From ede4446301d78c326b4a775bfd53fd19408b54bf Mon Sep 17 00:00:00 2001 From: ArthurHlt Date: Wed, 12 Jun 2019 18:47:09 +0200 Subject: [PATCH] use go modules and use terraform 0.12 --- go.mod | 51 + go.sum | 464 + .../github.com/apparentlymart/go-cidr/LICENSE | 19 - .../apparentlymart/go-cidr/cidr/cidr.go | 210 - .../apparentlymart/go-cidr/cidr/wrangling.go | 38 - vendor/github.com/armon/go-radix/LICENSE | 20 - vendor/github.com/armon/go-radix/README.md | 38 - vendor/github.com/armon/go-radix/radix.go | 543 - vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 - vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 - .../aws/aws-sdk-go/aws/awserr/error.go | 145 - .../aws/aws-sdk-go/aws/awserr/types.go | 194 - .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 - .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 - .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 - .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 - .../aws-sdk-go/aws/awsutil/string_value.go | 89 - .../aws/aws-sdk-go/aws/client/client.go | 90 - .../aws-sdk-go/aws/client/default_retryer.go | 96 - .../aws/aws-sdk-go/aws/client/logger.go | 108 - .../aws/client/metadata/client_info.go | 12 - .../github.com/aws/aws-sdk-go/aws/config.go | 470 - .../github.com/aws/aws-sdk-go/aws/context.go | 71 - .../aws/aws-sdk-go/aws/context_1_6.go | 41 - .../aws/aws-sdk-go/aws/context_1_7.go | 9 - .../aws/aws-sdk-go/aws/convert_types.go | 369 - .../aws-sdk-go/aws/corehandlers/handlers.go | 242 - .../aws/corehandlers/param_validator.go | 17 - .../aws/credentials/chain_provider.go | 102 - .../aws-sdk-go/aws/credentials/credentials.go | 246 - .../ec2rolecreds/ec2_role_provider.go | 178 - .../aws/credentials/endpointcreds/provider.go | 191 - .../aws/credentials/env_provider.go | 78 - .../aws-sdk-go/aws/credentials/example.ini | 12 - .../shared_credentials_provider.go | 150 - .../aws/credentials/static_provider.go | 57 - .../stscreds/assume_role_provider.go | 298 - .../aws/aws-sdk-go/aws/defaults/defaults.go | 163 - .../aws-sdk-go/aws/defaults/shared_config.go | 27 - vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 - .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 - .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 - .../aws/aws-sdk-go/aws/endpoints/decode.go | 133 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 2331 -- .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 - .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 439 - .../aws/aws-sdk-go/aws/endpoints/v3model.go | 303 - .../aws/endpoints/v3model_codegen.go | 337 - .../github.com/aws/aws-sdk-go/aws/errors.go | 17 - .../aws/aws-sdk-go/aws/jsonvalue.go | 12 - .../github.com/aws/aws-sdk-go/aws/logger.go | 112 - .../aws/request/connection_reset_error.go | 19 - .../request/connection_reset_error_other.go | 11 - .../aws/aws-sdk-go/aws/request/handlers.go | 256 - .../aws-sdk-go/aws/request/http_request.go | 24 - .../aws-sdk-go/aws/request/offset_reader.go | 58 - .../aws/aws-sdk-go/aws/request/request.go | 575 - .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 - .../aws/aws-sdk-go/aws/request/request_1_8.go | 33 - .../aws-sdk-go/aws/request/request_context.go | 14 - .../aws/request/request_context_1_6.go | 14 - .../aws/request/request_pagination.go | 236 - .../aws/aws-sdk-go/aws/request/retryer.go | 161 - .../aws/request/timeout_read_closer.go | 94 - .../aws/aws-sdk-go/aws/request/validation.go | 234 - .../aws/aws-sdk-go/aws/request/waiter.go | 287 - .../aws/aws-sdk-go/aws/session/doc.go | 273 - .../aws/aws-sdk-go/aws/session/env_config.go | 188 - .../aws/aws-sdk-go/aws/session/session.go | 606 - .../aws-sdk-go/aws/session/shared_config.go | 295 - .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 - .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 - .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 - .../aws/aws-sdk-go/aws/signer/v4/v4.go | 761 - vendor/github.com/aws/aws-sdk-go/aws/types.go | 118 - vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 - .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 - .../github.com/aws/aws-sdk-go/aws/version.go | 8 - .../internal/shareddefaults/shared_config.go | 40 - .../aws-sdk-go/private/endpoints/endpoints.go | 65 - .../private/endpoints/endpoints.json | 75 - .../private/endpoints/endpoints_map.go | 88 - .../private/protocol/idempotency.go | 75 - .../private/protocol/query/build.go | 36 - .../protocol/query/queryutil/queryutil.go | 237 - .../private/protocol/query/unmarshal.go | 35 - .../private/protocol/query/unmarshal_error.go | 66 - .../aws-sdk-go/private/protocol/rest/build.go | 290 - .../private/protocol/rest/payload.go | 45 - .../private/protocol/rest/unmarshal.go | 227 - .../private/protocol/restxml/restxml.go | 69 - .../aws-sdk-go/private/protocol/unmarshal.go | 21 - .../private/protocol/xml/xmlutil/build.go | 296 - .../private/protocol/xml/xmlutil/unmarshal.go | 260 - .../protocol/xml/xmlutil/xml_to_struct.go | 147 - .../private/signer/v4/header_rules.go | 82 - .../aws/aws-sdk-go/private/signer/v4/v4.go | 465 - .../aws/aws-sdk-go/private/waiter/waiter.go | 134 - .../aws/aws-sdk-go/service/s3/api.go | 19245 ---------------- .../aws-sdk-go/service/s3/bucket_location.go | 106 - .../aws/aws-sdk-go/service/s3/content_md5.go | 36 - .../aws-sdk-go/service/s3/customizations.go | 46 - .../aws/aws-sdk-go/service/s3/doc.go | 78 - .../aws/aws-sdk-go/service/s3/doc_custom.go | 109 - .../aws/aws-sdk-go/service/s3/errors.go | 48 - .../service/s3/host_style_bucket.go | 162 - .../service/s3/platform_handlers.go | 8 - .../service/s3/platform_handlers_go1.6.go | 28 - .../aws/aws-sdk-go/service/s3/service.go | 93 - .../aws/aws-sdk-go/service/s3/sse.go | 44 - .../aws-sdk-go/service/s3/statusok_error.go | 35 - .../aws-sdk-go/service/s3/unmarshal_error.go | 103 - .../aws/aws-sdk-go/service/s3/waiters.go | 214 - .../aws/aws-sdk-go/service/sts/api.go | 2365 -- .../aws-sdk-go/service/sts/customizations.go | 12 - .../aws/aws-sdk-go/service/sts/doc.go | 124 - .../aws/aws-sdk-go/service/sts/errors.go | 73 - .../aws/aws-sdk-go/service/sts/service.go | 93 - vendor/github.com/bgentry/go-netrc/LICENSE | 20 - .../bgentry/go-netrc/netrc/netrc.go | 510 - vendor/github.com/bgentry/speakeasy/LICENSE | 24 - .../bgentry/speakeasy/LICENSE_WINDOWS | 201 - vendor/github.com/bgentry/speakeasy/Readme.md | 30 - .../github.com/bgentry/speakeasy/speakeasy.go | 49 - .../bgentry/speakeasy/speakeasy_unix.go | 93 - .../bgentry/speakeasy/speakeasy_windows.go | 41 - vendor/github.com/blang/semver/LICENSE | 22 - vendor/github.com/blang/semver/README.md | 194 - vendor/github.com/blang/semver/json.go | 23 - vendor/github.com/blang/semver/package.json | 17 - vendor/github.com/blang/semver/range.go | 416 - vendor/github.com/blang/semver/semver.go | 418 - vendor/github.com/blang/semver/sort.go | 28 - vendor/github.com/blang/semver/sql.go | 30 - vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 152 - .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 - .../github.com/davecgh/go-spew/spew/config.go | 306 - vendor/github.com/davecgh/go-spew/spew/doc.go | 211 - .../github.com/davecgh/go-spew/spew/dump.go | 509 - .../github.com/davecgh/go-spew/spew/format.go | 419 - .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/go-ini/ini/LICENSE | 191 - vendor/github.com/go-ini/ini/Makefile | 12 - vendor/github.com/go-ini/ini/README.md | 746 - vendor/github.com/go-ini/ini/README_ZH.md | 733 - vendor/github.com/go-ini/ini/error.go | 32 - vendor/github.com/go-ini/ini/ini.go | 561 - vendor/github.com/go-ini/ini/key.go | 699 - vendor/github.com/go-ini/ini/parser.go | 361 - vendor/github.com/go-ini/ini/section.go | 248 - vendor/github.com/go-ini/ini/struct.go | 500 - vendor/github.com/golang/protobuf/LICENSE | 31 - .../github.com/golang/protobuf/proto/Makefile | 43 - .../github.com/golang/protobuf/proto/clone.go | 229 - .../golang/protobuf/proto/decode.go | 970 - .../golang/protobuf/proto/encode.go | 1362 -- .../github.com/golang/protobuf/proto/equal.go | 300 - .../golang/protobuf/proto/extensions.go | 587 - .../github.com/golang/protobuf/proto/lib.go | 897 - .../golang/protobuf/proto/message_set.go | 311 - .../golang/protobuf/proto/pointer_reflect.go | 484 - .../golang/protobuf/proto/pointer_unsafe.go | 270 - .../golang/protobuf/proto/properties.go | 872 - .../github.com/golang/protobuf/proto/text.go | 854 - .../golang/protobuf/proto/text_parser.go | 895 - .../github.com/golang/protobuf/ptypes/any.go | 139 - .../golang/protobuf/ptypes/any/any.pb.go | 168 - .../golang/protobuf/ptypes/any/any.proto | 139 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 144 - .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/regen.sh | 43 - .../golang/protobuf/ptypes/timestamp.go | 134 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 160 - .../protobuf/ptypes/timestamp/timestamp.proto | 133 - .../github.com/gucumber/gucumber/LICENSE.txt | 22 - .../gucumber/gucumber/gherkin/i18n.go | 56 - .../gucumber/gucumber/gherkin/parser.go | 353 - .../gucumber/gucumber/gherkin/types.go | 226 - vendor/github.com/hashicorp/errwrap/LICENSE | 354 - vendor/github.com/hashicorp/errwrap/README.md | 89 - .../github.com/hashicorp/errwrap/errwrap.go | 169 - .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 - .../hashicorp/go-cleanhttp/README.md | 30 - .../hashicorp/go-cleanhttp/cleanhttp.go | 56 - .../github.com/hashicorp/go-cleanhttp/doc.go | 20 - vendor/github.com/hashicorp/go-getter/LICENSE | 354 - .../github.com/hashicorp/go-getter/README.md | 257 - .../hashicorp/go-getter/appveyor.yml | 16 - .../github.com/hashicorp/go-getter/client.go | 335 - .../hashicorp/go-getter/client_mode.go | 24 - .../hashicorp/go-getter/copy_dir.go | 78 - .../hashicorp/go-getter/decompress.go | 29 - .../hashicorp/go-getter/decompress_bzip2.go | 45 - .../hashicorp/go-getter/decompress_gzip.go | 49 - .../hashicorp/go-getter/decompress_tar.go | 83 - .../hashicorp/go-getter/decompress_tbz2.go | 33 - .../hashicorp/go-getter/decompress_testing.go | 135 - .../hashicorp/go-getter/decompress_tgz.go | 39 - .../hashicorp/go-getter/decompress_zip.go | 96 - .../github.com/hashicorp/go-getter/detect.go | 97 - .../hashicorp/go-getter/detect_bitbucket.go | 66 - .../hashicorp/go-getter/detect_file.go | 67 - .../hashicorp/go-getter/detect_github.go | 73 - .../hashicorp/go-getter/detect_s3.go | 61 - .../hashicorp/go-getter/folder_storage.go | 65 - vendor/github.com/hashicorp/go-getter/get.go | 139 - .../hashicorp/go-getter/get_file.go | 32 - .../hashicorp/go-getter/get_file_unix.go | 103 - .../hashicorp/go-getter/get_file_windows.go | 120 - .../github.com/hashicorp/go-getter/get_git.go | 225 - .../github.com/hashicorp/go-getter/get_hg.go | 131 - .../hashicorp/go-getter/get_http.go | 227 - .../hashicorp/go-getter/get_mock.go | 52 - .../github.com/hashicorp/go-getter/get_s3.go | 243 - .../hashicorp/go-getter/helper/url/url.go | 14 - .../go-getter/helper/url/url_unix.go | 11 - .../go-getter/helper/url/url_windows.go | 40 - .../github.com/hashicorp/go-getter/netrc.go | 67 - .../github.com/hashicorp/go-getter/source.go | 36 - .../github.com/hashicorp/go-getter/storage.go | 13 - vendor/github.com/hashicorp/go-hclog/LICENSE | 21 - .../github.com/hashicorp/go-hclog/README.md | 123 - .../github.com/hashicorp/go-hclog/global.go | 34 - vendor/github.com/hashicorp/go-hclog/int.go | 385 - vendor/github.com/hashicorp/go-hclog/log.go | 138 - .../hashicorp/go-hclog/stacktrace.go | 108 - .../github.com/hashicorp/go-hclog/stdlog.go | 62 - .../hashicorp/go-multierror/LICENSE | 353 - .../hashicorp/go-multierror/Makefile | 31 - .../hashicorp/go-multierror/README.md | 97 - .../hashicorp/go-multierror/append.go | 41 - .../hashicorp/go-multierror/flatten.go | 26 - .../hashicorp/go-multierror/format.go | 27 - .../hashicorp/go-multierror/multierror.go | 51 - .../hashicorp/go-multierror/prefix.go | 37 - vendor/github.com/hashicorp/go-plugin/LICENSE | 353 - .../github.com/hashicorp/go-plugin/README.md | 168 - .../github.com/hashicorp/go-plugin/client.go | 772 - .../hashicorp/go-plugin/discover.go | 28 - .../github.com/hashicorp/go-plugin/error.go | 24 - .../hashicorp/go-plugin/grpc_client.go | 83 - .../hashicorp/go-plugin/grpc_server.go | 115 - .../hashicorp/go-plugin/log_entry.go | 73 - .../hashicorp/go-plugin/mux_broker.go | 204 - .../github.com/hashicorp/go-plugin/plugin.go | 56 - .../github.com/hashicorp/go-plugin/process.go | 24 - .../hashicorp/go-plugin/process_posix.go | 19 - .../hashicorp/go-plugin/process_windows.go | 29 - .../hashicorp/go-plugin/protocol.go | 45 - .../hashicorp/go-plugin/rpc_client.go | 170 - .../hashicorp/go-plugin/rpc_server.go | 197 - .../github.com/hashicorp/go-plugin/server.go | 310 - .../hashicorp/go-plugin/server_mux.go | 31 - .../github.com/hashicorp/go-plugin/stream.go | 18 - .../github.com/hashicorp/go-plugin/testing.go | 120 - vendor/github.com/hashicorp/go-uuid/LICENSE | 363 - vendor/github.com/hashicorp/go-uuid/README.md | 8 - vendor/github.com/hashicorp/go-uuid/uuid.go | 65 - .../github.com/hashicorp/go-version/LICENSE | 354 - .../github.com/hashicorp/go-version/README.md | 65 - .../hashicorp/go-version/constraint.go | 178 - .../hashicorp/go-version/version.go | 322 - .../go-version/version_collection.go | 17 - vendor/github.com/hashicorp/hcl/LICENSE | 354 - vendor/github.com/hashicorp/hcl/Makefile | 18 - vendor/github.com/hashicorp/hcl/README.md | 125 - vendor/github.com/hashicorp/hcl/appveyor.yml | 19 - vendor/github.com/hashicorp/hcl/decoder.go | 724 - vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 - .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 520 - .../hashicorp/hcl/hcl/scanner/scanner.go | 651 - .../hashicorp/hcl/hcl/strconv/quote.go | 241 - .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 - .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 - .../hashicorp/hcl/json/scanner/scanner.go | 451 - .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - .../hashicorp/hcl/testhelper/unix2dos.go | 15 - vendor/github.com/hashicorp/hil/LICENSE | 353 - vendor/github.com/hashicorp/hil/README.md | 102 - vendor/github.com/hashicorp/hil/appveyor.yml | 18 - .../hashicorp/hil/ast/arithmetic.go | 43 - .../hashicorp/hil/ast/arithmetic_op.go | 24 - vendor/github.com/hashicorp/hil/ast/ast.go | 99 - vendor/github.com/hashicorp/hil/ast/call.go | 47 - .../hashicorp/hil/ast/conditional.go | 36 - vendor/github.com/hashicorp/hil/ast/index.go | 76 - .../github.com/hashicorp/hil/ast/literal.go | 88 - vendor/github.com/hashicorp/hil/ast/output.go | 78 - vendor/github.com/hashicorp/hil/ast/scope.go | 90 - vendor/github.com/hashicorp/hil/ast/stack.go | 25 - .../hashicorp/hil/ast/type_string.go | 54 - .../github.com/hashicorp/hil/ast/unknown.go | 30 - .../hashicorp/hil/ast/variable_access.go | 36 - .../hashicorp/hil/ast/variables_helper.go | 63 - vendor/github.com/hashicorp/hil/builtins.go | 331 - .../hashicorp/hil/check_identifier.go | 88 - .../github.com/hashicorp/hil/check_types.go | 668 - vendor/github.com/hashicorp/hil/convert.go | 159 - vendor/github.com/hashicorp/hil/eval.go | 472 - vendor/github.com/hashicorp/hil/eval_type.go | 16 - .../hashicorp/hil/evaltype_string.go | 42 - vendor/github.com/hashicorp/hil/parse.go | 29 - .../hashicorp/hil/parser/binary_op.go | 45 - .../github.com/hashicorp/hil/parser/error.go | 38 - .../github.com/hashicorp/hil/parser/fuzz.go | 28 - .../github.com/hashicorp/hil/parser/parser.go | 522 - .../hashicorp/hil/scanner/peeker.go | 55 - .../hashicorp/hil/scanner/scanner.go | 556 - .../github.com/hashicorp/hil/scanner/token.go | 105 - .../hashicorp/hil/scanner/tokentype_string.go | 51 - .../hashicorp/hil/transform_fixed.go | 29 - vendor/github.com/hashicorp/hil/walk.go | 266 - vendor/github.com/hashicorp/logutils/LICENSE | 354 - .../github.com/hashicorp/logutils/README.md | 36 - vendor/github.com/hashicorp/logutils/level.go | 81 - vendor/github.com/hashicorp/terraform/LICENSE | 354 - .../hashicorp/terraform/config/append.go | 86 - .../hashicorp/terraform/config/config.go | 1074 - .../terraform/config/config_string.go | 338 - .../terraform/config/config_terraform.go | 117 - .../hashicorp/terraform/config/config_tree.go | 43 - .../hashicorp/terraform/config/import_tree.go | 113 - .../hashicorp/terraform/config/interpolate.go | 386 - .../terraform/config/interpolate_funcs.go | 1455 -- .../terraform/config/interpolate_walk.go | 283 - .../hashicorp/terraform/config/lang.go | 11 - .../hashicorp/terraform/config/loader.go | 224 - .../hashicorp/terraform/config/loader_hcl.go | 1160 - .../hashicorp/terraform/config/merge.go | 193 - .../terraform/config/module/copy_dir.go | 114 - .../hashicorp/terraform/config/module/get.go | 71 - .../terraform/config/module/inode.go | 21 - .../terraform/config/module/inode_freebsd.go | 21 - .../terraform/config/module/inode_windows.go | 8 - .../terraform/config/module/module.go | 7 - .../terraform/config/module/testing.go | 38 - .../hashicorp/terraform/config/module/tree.go | 447 - .../terraform/config/module/tree_gob.go | 57 - .../config/module/validate_provider_alias.go | 118 - .../hashicorp/terraform/config/providers.go | 103 - .../terraform/config/provisioner_enums.go | 40 - .../hashicorp/terraform/config/raw_config.go | 335 - .../terraform/config/resource_mode.go | 9 - .../terraform/config/resource_mode_string.go | 16 - .../hashicorp/terraform/config/testing.go | 15 - .../github.com/hashicorp/terraform/dag/dag.go | 286 - .../github.com/hashicorp/terraform/dag/dot.go | 282 - .../hashicorp/terraform/dag/edge.go | 37 - .../hashicorp/terraform/dag/graph.go | 391 - .../hashicorp/terraform/dag/marshal.go | 462 - .../github.com/hashicorp/terraform/dag/set.go | 123 - .../hashicorp/terraform/dag/tarjan.go | 107 - .../hashicorp/terraform/dag/walk.go | 445 - .../hashicorp/terraform/flatmap/expand.go | 152 - .../hashicorp/terraform/flatmap/flatten.go | 71 - .../hashicorp/terraform/flatmap/map.go | 82 - .../terraform/helper/acctest/acctest.go | 2 - .../terraform/helper/acctest/random.go | 145 - .../terraform/helper/acctest/remotetests.go | 27 - .../terraform/helper/config/decode.go | 28 - .../terraform/helper/config/validator.go | 214 - .../terraform/helper/experiment/experiment.go | 154 - .../terraform/helper/experiment/id.go | 34 - .../terraform/helper/hashcode/hashcode.go | 22 - .../helper/hilmapstructure/hilmapstructure.go | 41 - .../terraform/helper/logging/logging.go | 100 - .../terraform/helper/logging/transport.go | 53 - .../terraform/helper/mutexkv/mutexkv.go | 51 - .../terraform/helper/resource/error.go | 79 - .../hashicorp/terraform/helper/resource/id.go | 40 - .../terraform/helper/resource/map.go | 140 - .../terraform/helper/resource/resource.go | 49 - .../terraform/helper/resource/state.go | 259 - .../terraform/helper/resource/testing.go | 958 - .../helper/resource/testing_config.go | 160 - .../helper/resource/testing_import_state.go | 141 - .../terraform/helper/resource/wait.go | 84 - .../terraform/helper/schema/README.md | 11 - .../terraform/helper/schema/backend.go | 94 - .../schema/data_source_resource_shim.go | 59 - .../terraform/helper/schema/equal.go | 6 - .../terraform/helper/schema/field_reader.go | 334 - .../helper/schema/field_reader_config.go | 333 - .../helper/schema/field_reader_diff.go | 238 - .../helper/schema/field_reader_map.go | 232 - .../helper/schema/field_reader_multi.go | 63 - .../terraform/helper/schema/field_writer.go | 8 - .../helper/schema/field_writer_map.go | 319 - .../helper/schema/getsource_string.go | 36 - .../terraform/helper/schema/provider.go | 417 - .../terraform/helper/schema/provisioner.go | 204 - .../terraform/helper/schema/resource.go | 501 - .../terraform/helper/schema/resource_data.go | 518 - .../helper/schema/resource_data_get_source.go | 17 - .../helper/schema/resource_importer.go | 52 - .../helper/schema/resource_timeout.go | 237 - .../terraform/helper/schema/schema.go | 1549 -- .../terraform/helper/schema/serialize.go | 125 - .../hashicorp/terraform/helper/schema/set.go | 234 - .../terraform/helper/schema/testing.go | 30 - .../terraform/helper/schema/valuetype.go | 21 - .../helper/schema/valuetype_string.go | 16 - .../terraform/helper/shadow/closer.go | 80 - .../terraform/helper/shadow/compared_value.go | 128 - .../terraform/helper/shadow/keyed_value.go | 151 - .../terraform/helper/shadow/ordered_value.go | 66 - .../terraform/helper/shadow/value.go | 79 - .../terraform/moduledeps/dependencies.go | 43 - .../hashicorp/terraform/moduledeps/doc.go | 7 - .../hashicorp/terraform/moduledeps/module.go | 204 - .../terraform/moduledeps/provider.go | 30 - .../hashicorp/terraform/plugin/client.go | 33 - .../terraform/plugin/discovery/error.go | 30 - .../terraform/plugin/discovery/find.go | 167 - .../terraform/plugin/discovery/get.go | 432 - .../terraform/plugin/discovery/meta.go | 41 - .../terraform/plugin/discovery/meta_set.go | 195 - .../plugin/discovery/requirements.go | 105 - .../terraform/plugin/discovery/signature.go | 53 - .../terraform/plugin/discovery/version.go | 72 - .../terraform/plugin/discovery/version_set.go | 84 - .../hashicorp/terraform/plugin/plugin.go | 13 - .../terraform/plugin/resource_provider.go | 578 - .../terraform/plugin/resource_provisioner.go | 173 - .../hashicorp/terraform/plugin/serve.go | 54 - .../hashicorp/terraform/plugin/ui_input.go | 51 - .../hashicorp/terraform/plugin/ui_output.go | 29 - .../hashicorp/terraform/terraform/context.go | 1048 - .../terraform/terraform/context_components.go | 65 - .../terraform/terraform/context_graph_type.go | 32 - .../terraform/terraform/context_import.go | 77 - .../hashicorp/terraform/terraform/debug.go | 523 - .../hashicorp/terraform/terraform/diff.go | 866 - .../terraform/terraform/edge_destroy.go | 17 - .../hashicorp/terraform/terraform/eval.go | 63 - .../terraform/terraform/eval_apply.go | 359 - .../terraform/eval_check_prevent_destroy.go | 38 - .../terraform/terraform/eval_context.go | 84 - .../terraform/eval_context_builtin.go | 347 - .../terraform/terraform/eval_context_mock.go | 208 - .../terraform/terraform/eval_count.go | 58 - .../terraform/eval_count_boundary.go | 78 - .../terraform/eval_count_computed.go | 25 - .../terraform/terraform/eval_diff.go | 490 - .../terraform/terraform/eval_error.go | 20 - .../terraform/terraform/eval_filter.go | 25 - .../terraform/eval_filter_operation.go | 49 - .../hashicorp/terraform/terraform/eval_if.go | 26 - .../terraform/terraform/eval_import_state.go | 76 - .../terraform/terraform/eval_interpolate.go | 53 - .../terraform/terraform/eval_noop.go | 8 - .../terraform/terraform/eval_output.go | 119 - .../terraform/terraform/eval_provider.go | 164 - .../terraform/terraform/eval_provisioner.go | 47 - .../terraform/terraform/eval_read_data.go | 139 - .../terraform/terraform/eval_refresh.go | 55 - .../terraform/terraform/eval_resource.go | 13 - .../terraform/terraform/eval_sequence.go | 27 - .../terraform/terraform/eval_state.go | 326 - .../terraform/terraform/eval_validate.go | 227 - .../terraform/eval_validate_selfref.go | 74 - .../terraform/terraform/eval_variable.go | 279 - .../terraform/terraform/evaltree_provider.go | 119 - .../hashicorp/terraform/terraform/graph.go | 172 - .../terraform/terraform/graph_builder.go | 77 - .../terraform/graph_builder_apply.go | 141 - .../terraform/graph_builder_destroy_plan.go | 67 - .../terraform/graph_builder_import.go | 76 - .../terraform/graph_builder_input.go | 30 - .../terraform/terraform/graph_builder_plan.go | 178 - .../terraform/graph_builder_refresh.go | 171 - .../terraform/graph_builder_validate.go | 36 - .../terraform/terraform/graph_dot.go | 9 - .../terraform/graph_interface_subgraph.go | 7 - .../terraform/terraform/graph_walk.go | 60 - .../terraform/terraform/graph_walk_context.go | 157 - .../terraform/graph_walk_operation.go | 18 - .../terraform/terraform/graphtype_string.go | 16 - .../hashicorp/terraform/terraform/hook.go | 137 - .../terraform/terraform/hook_mock.go | 245 - .../terraform/terraform/hook_stop.go | 87 - .../terraform/terraform/instancetype.go | 13 - .../terraform/instancetype_string.go | 16 - .../terraform/terraform/interpolate.go | 799 - .../terraform/module_dependencies.go | 156 - .../terraform/node_count_boundary.go | 14 - .../terraform/terraform/node_data_destroy.go | 22 - .../terraform/terraform/node_data_refresh.go | 218 - .../terraform/node_module_destroy.go | 29 - .../terraform/node_module_variable.go | 140 - .../terraform/terraform/node_output.go | 85 - .../terraform/terraform/node_output_orphan.go | 35 - .../terraform/terraform/node_provider.go | 11 - .../terraform/node_provider_abstract.go | 85 - .../terraform/node_provider_disabled.go | 38 - .../terraform/terraform/node_provisioner.go | 44 - .../terraform/node_resource_abstract.go | 240 - .../terraform/node_resource_abstract_count.go | 50 - .../terraform/node_resource_apply.go | 357 - .../terraform/node_resource_destroy.go | 288 - .../terraform/terraform/node_resource_plan.go | 83 - .../terraform/node_resource_plan_destroy.go | 53 - .../terraform/node_resource_plan_instance.go | 190 - .../terraform/node_resource_plan_orphan.go | 54 - .../terraform/node_resource_refresh.go | 259 - .../terraform/node_resource_validate.go | 158 - .../terraform/terraform/node_root_variable.go | 22 - .../hashicorp/terraform/terraform/path.go | 24 - .../hashicorp/terraform/terraform/plan.go | 196 - .../hashicorp/terraform/terraform/resource.go | 360 - .../terraform/terraform/resource_address.go | 471 - .../terraform/terraform/resource_provider.go | 285 - .../terraform/resource_provider_mock.go | 297 - .../terraform/resource_provisioner.go | 54 - .../terraform/resource_provisioner_mock.go | 72 - .../terraform/terraform/semantics.go | 132 - .../hashicorp/terraform/terraform/shadow.go | 28 - .../terraform/terraform/shadow_components.go | 273 - .../terraform/terraform/shadow_context.go | 158 - .../terraform/shadow_resource_provider.go | 815 - .../terraform/shadow_resource_provisioner.go | 282 - .../hashicorp/terraform/terraform/state.go | 2136 -- .../terraform/terraform/state_add.go | 374 - .../terraform/terraform/state_filter.go | 267 - .../terraform/state_upgrade_v1_to_v2.go | 189 - .../terraform/state_upgrade_v2_to_v3.go | 142 - .../hashicorp/terraform/terraform/state_v1.go | 145 - .../terraform/terraform/test_failure | 9 - .../hashicorp/terraform/terraform/testing.go | 19 - .../terraform/terraform/transform.go | 52 - .../transform_attach_config_provider.go | 80 - .../transform_attach_config_resource.go | 78 - .../terraform/transform_attach_state.go | 68 - .../terraform/terraform/transform_config.go | 135 - .../terraform/transform_config_flat.go | 80 - .../terraform/transform_config_old.go | 23 - .../terraform/transform_count_boundary.go | 28 - .../terraform/terraform/transform_deposed.go | 168 - .../terraform/transform_destroy_cbd.go | 257 - .../terraform/transform_destroy_edge.go | 269 - .../terraform/terraform/transform_diff.go | 86 - .../terraform/terraform/transform_expand.go | 48 - .../terraform/transform_import_provider.go | 38 - .../terraform/transform_import_state.go | 241 - .../terraform/transform_module_variable.go | 122 - .../terraform/transform_orphan_count.go | 110 - .../terraform/transform_orphan_output.go | 64 - .../terraform/transform_orphan_resource.go | 78 - .../terraform/terraform/transform_output.go | 59 - .../terraform/terraform/transform_provider.go | 380 - .../terraform/transform_provider_disable.go | 50 - .../terraform/transform_provisioner.go | 206 - .../terraform/transform_reference.go | 321 - .../terraform/transform_resource_count.go | 51 - .../terraform/terraform/transform_root.go | 38 - .../terraform/terraform/transform_state.go | 65 - .../terraform/terraform/transform_targets.go | 230 - .../transform_transitive_reduction.go | 20 - .../terraform/terraform/transform_variable.go | 40 - .../terraform/terraform/transform_vertex.go | 44 - .../hashicorp/terraform/terraform/ui_input.go | 26 - .../terraform/terraform/ui_input_mock.go | 23 - .../terraform/terraform/ui_input_prefix.go | 19 - .../terraform/terraform/ui_output.go | 7 - .../terraform/terraform/ui_output_callback.go | 9 - .../terraform/terraform/ui_output_mock.go | 16 - .../terraform/ui_output_provisioner.go | 15 - .../terraform/terraform/user_agent.go | 14 - .../hashicorp/terraform/terraform/util.go | 81 - .../terraform/terraform/variables.go | 166 - .../hashicorp/terraform/terraform/version.go | 31 - .../terraform/terraform/version_required.go | 69 - .../terraform/walkoperation_string.go | 16 - vendor/github.com/hashicorp/yamux/LICENSE | 362 - vendor/github.com/hashicorp/yamux/README.md | 86 - vendor/github.com/hashicorp/yamux/addr.go | 60 - vendor/github.com/hashicorp/yamux/const.go | 157 - vendor/github.com/hashicorp/yamux/mux.go | 87 - vendor/github.com/hashicorp/yamux/session.go | 598 - vendor/github.com/hashicorp/yamux/spec.md | 141 - vendor/github.com/hashicorp/yamux/stream.go | 452 - vendor/github.com/hashicorp/yamux/util.go | 28 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 44 - .../github.com/jmespath/go-jmespath/README.md | 7 - vendor/github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 - .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 - .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - vendor/github.com/lsegal/gucumber/LICENSE.txt | 22 - vendor/github.com/lsegal/gucumber/Makefile | 11 - vendor/github.com/lsegal/gucumber/README.md | 113 - vendor/github.com/lsegal/gucumber/builder.go | 160 - vendor/github.com/lsegal/gucumber/run_main.go | 44 - vendor/github.com/lsegal/gucumber/runner.go | 419 - vendor/github.com/lsegal/gucumber/stepdef.go | 224 - vendor/github.com/mattn/go-isatty/LICENSE | 9 - vendor/github.com/mattn/go-isatty/README.md | 50 - vendor/github.com/mattn/go-isatty/doc.go | 2 - .../mattn/go-isatty/isatty_appengine.go | 15 - .../github.com/mattn/go-isatty/isatty_bsd.go | 18 - .../mattn/go-isatty/isatty_linux.go | 18 - .../mattn/go-isatty/isatty_others.go | 10 - .../mattn/go-isatty/isatty_solaris.go | 16 - .../mattn/go-isatty/isatty_windows.go | 94 - vendor/github.com/mitchellh/cli/LICENSE | 354 - vendor/github.com/mitchellh/cli/Makefile | 20 - vendor/github.com/mitchellh/cli/README.md | 67 - .../github.com/mitchellh/cli/autocomplete.go | 43 - vendor/github.com/mitchellh/cli/cli.go | 664 - vendor/github.com/mitchellh/cli/command.go | 67 - .../github.com/mitchellh/cli/command_mock.go | 63 - vendor/github.com/mitchellh/cli/help.go | 79 - vendor/github.com/mitchellh/cli/ui.go | 187 - vendor/github.com/mitchellh/cli/ui_colored.go | 69 - .../github.com/mitchellh/cli/ui_concurrent.go | 54 - vendor/github.com/mitchellh/cli/ui_mock.go | 111 - vendor/github.com/mitchellh/cli/ui_writer.go | 18 - .../mitchellh/copystructure/LICENSE | 21 - .../mitchellh/copystructure/README.md | 21 - .../mitchellh/copystructure/copier_time.go | 15 - .../mitchellh/copystructure/copystructure.go | 548 - .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 137 - .../mitchellh/go-testing-interface/LICENSE | 21 - .../mitchellh/go-testing-interface/README.md | 52 - .../mitchellh/go-testing-interface/testing.go | 70 - .../mitchellh/hashstructure/LICENSE | 21 - .../mitchellh/hashstructure/README.md | 65 - .../mitchellh/hashstructure/hashstructure.go | 358 - .../mitchellh/hashstructure/include.go | 15 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 152 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 828 - .../github.com/mitchellh/reflectwalk/LICENSE | 21 - .../mitchellh/reflectwalk/README.md | 6 - .../mitchellh/reflectwalk/location.go | 19 - .../mitchellh/reflectwalk/location_string.go | 16 - .../mitchellh/reflectwalk/reflectwalk.go | 401 - vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 - .../github.com/posener/complete/LICENSE.txt | 21 - vendor/github.com/posener/complete/args.go | 75 - vendor/github.com/posener/complete/cmd/cmd.go | 128 - .../posener/complete/cmd/install/bash.go | 32 - .../posener/complete/cmd/install/install.go | 92 - .../posener/complete/cmd/install/utils.go | 118 - .../posener/complete/cmd/install/zsh.go | 39 - vendor/github.com/posener/complete/command.go | 118 - .../github.com/posener/complete/complete.go | 86 - vendor/github.com/posener/complete/log.go | 23 - .../github.com/posener/complete/match/file.go | 19 - .../posener/complete/match/match.go | 6 - .../posener/complete/match/prefix.go | 9 - .../posener/complete/metalinter.json | 21 - vendor/github.com/posener/complete/predict.go | 41 - .../posener/complete/predict_files.go | 108 - .../posener/complete/predict_set.go | 19 - vendor/github.com/posener/complete/readme.md | 116 - vendor/github.com/posener/complete/test.sh | 12 - vendor/github.com/posener/complete/utils.go | 46 - vendor/github.com/satori/go.uuid/LICENSE | 20 - vendor/github.com/satori/go.uuid/README.md | 65 - vendor/github.com/satori/go.uuid/uuid.go | 481 - vendor/github.com/shiena/ansicolor/LICENSE | 21 - vendor/github.com/shiena/ansicolor/README.md | 101 - .../github.com/shiena/ansicolor/ansicolor.go | 42 - .../shiena/ansicolor/ansicolor_ansi.go | 18 - .../shiena/ansicolor/ansicolor_windows.go | 417 - .../github.com/sky-uk/gonsx/CONTRIBUTING.md | 72 - vendor/github.com/sky-uk/gonsx/Dockerfile | 37 - vendor/github.com/sky-uk/gonsx/Jenkinsfile | 147 - vendor/github.com/sky-uk/gonsx/LICENSE | 29 - vendor/github.com/sky-uk/gonsx/Makefile | 70 - vendor/github.com/sky-uk/gonsx/README.md | 451 - vendor/github.com/sky-uk/gonsx/VERSION | 1 - .../github.com/sky-uk/gonsx/api/base_api.go | 73 - .../gonsx/api/dhcprelay/create_dhcprelay.go | 24 - .../gonsx/api/dhcprelay/delete_dhcprelay.go | 18 - .../gonsx/api/dhcprelay/dhcprelay_types.go | 24 - .../dhcprelay/dhcprelay_types_functions.go | 56 - .../gonsx/api/dhcprelay/get_all_dhcprelay.go | 23 - .../gonsx/api/dhcprelay/update_dhcprelay.go | 24 - .../api/edgeinterface/create_edgeinterface.go | 25 - .../api/edgeinterface/delete_edgeinterface.go | 19 - .../api/edgeinterface/edgeinterface_types.go | 34 - .../edgeinterface_types_functions.go | 24 - .../edgeinterface/get_all_edgeinterfaces.go | 23 - .../api/edgeinterface/get_edgeinterface.go | 24 - .../api/edgeinterface/update_edgeinterface.go | 19 - .../create_firewallexclusion.go | 23 - .../delete_firewallexclusion.go | 23 - .../firewallexclusion_types.go | 16 - .../firewallexclusion_types_functions.go | 35 - .../get_all_firewallexclusion.go | 23 - vendor/github.com/sky-uk/gonsx/api/nsx_api.go | 16 - .../api/securitygroup/create_securitygroup.go | 27 - .../api/securitygroup/delete_securitygroup.go | 18 - .../securitygroup/get_all_securitygroup.go | 23 - .../api/securitygroup/securitygroup_types.go | 41 - .../securitygroup_types_functions.go | 65 - .../api/securitygroup/update_securitygroup.go | 24 - .../securitypolicy/create_securitypolicy.go | 36 - .../securitypolicy/delete_securitypolicy.go | 23 - .../securitypolicy/get_all_securitypolicy.go | 23 - .../api/securitypolicy/get_securitypolicy.go | 23 - .../securitypolicy/securitypolicy_types.go | 69 - .../securitypolicy_types_functions.go | 218 - .../securitypolicy/update_securitypolicy.go | 24 - .../api/securitytag/assign_securitytag.go | 18 - .../api/securitytag/create_securitytag.go | 28 - .../api/securitytag/delete_securitytag.go | 18 - .../api/securitytag/detach_securitytag.go | 18 - .../get_all_attached_securitytags.go | 23 - .../api/securitytag/get_all_securitytags.go | 23 - .../get_all_securitytags_attached_to_vm.go | 23 - .../api/securitytag/securitytags_types.go | 41 - .../securitytags_types_functions.go | 72 - .../update_attached_securitytags.go | 24 - .../api/securitytag/update_securitytag.go | 23 - .../gonsx/api/service/create_service.go | 30 - .../gonsx/api/service/delete_service.go | 18 - .../gonsx/api/service/get_all_service.go | 23 - .../sky-uk/gonsx/api/service/service_types.go | 27 - .../api/service/service_types_functions.go | 35 - .../gonsx/api/service/update_service.go | 24 - .../sky-uk/gonsx/api/tzone/get_all_tzone.go | 23 - .../sky-uk/gonsx/api/tzone/get_tzone.go | 23 - .../sky-uk/gonsx/api/tzone/tzone_types.go | 12 - .../gonsx/api/tzone/tzone_types_functions.go | 23 - .../api/virtualwire/create_virtualwire.go | 23 - .../api/virtualwire/delete_virtualwire.go | 18 - .../api/virtualwire/get_all_virtualwire.go | 23 - .../gonsx/api/virtualwire/get_virtualwire.go | 23 - .../api/virtualwire/update_virtualwire.go | 18 - .../api/virtualwire/virtualwire_types.go | 44 - .../virtualwire_types_functions.go | 23 - vendor/github.com/sky-uk/gonsx/client.go | 110 - .../sky-uk/gonsx/docker-compose.yml | 8 - .../github.com/stretchr/testify/LICENCE.txt | 22 - vendor/github.com/stretchr/testify/LICENSE | 22 - .../testify/assert/assertion_forward.go | 387 - .../testify/assert/assertion_forward.go.tmpl | 4 - .../stretchr/testify/assert/assertions.go | 1007 - .../github.com/stretchr/testify/assert/doc.go | 45 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 106 - vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 - vendor/golang.org/x/crypto/blowfish/block.go | 159 - vendor/golang.org/x/crypto/blowfish/cipher.go | 91 - vendor/golang.org/x/crypto/blowfish/const.go | 199 - vendor/golang.org/x/crypto/cast5/cast5.go | 526 - .../x/crypto/curve25519/const_amd64.h | 8 - .../x/crypto/curve25519/const_amd64.s | 20 - .../x/crypto/curve25519/cswap_amd64.s | 65 - .../x/crypto/curve25519/curve25519.go | 834 - vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../x/crypto/curve25519/freeze_amd64.s | 73 - .../x/crypto/curve25519/ladderstep_amd64.s | 1377 -- .../x/crypto/curve25519/mont25519_amd64.go | 240 - .../x/crypto/curve25519/mul_amd64.s | 169 - .../x/crypto/curve25519/square_amd64.s | 132 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 181 - .../ed25519/internal/edwards25519/const.go | 1422 -- .../internal/edwards25519/edwards25519.go | 1771 -- .../x/crypto/openpgp/armor/armor.go | 219 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 122 - .../x/crypto/openpgp/errors/errors.go | 72 - vendor/golang.org/x/crypto/openpgp/keys.go | 637 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 199 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/packet.go | 537 - .../x/crypto/openpgp/packet/private_key.go | 380 - .../x/crypto/openpgp/packet/public_key.go | 748 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../x/crypto/openpgp/packet/userid.go | 160 - vendor/golang.org/x/crypto/openpgp/read.go | 442 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 - vendor/golang.org/x/crypto/openpgp/write.go | 378 - vendor/golang.org/x/crypto/ssh/buffer.go | 98 - vendor/golang.org/x/crypto/ssh/certs.go | 519 - vendor/golang.org/x/crypto/ssh/channel.go | 633 - vendor/golang.org/x/crypto/ssh/cipher.go | 627 - vendor/golang.org/x/crypto/ssh/client.go | 257 - vendor/golang.org/x/crypto/ssh/client_auth.go | 486 - vendor/golang.org/x/crypto/ssh/common.go | 373 - vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 21 - vendor/golang.org/x/crypto/ssh/handshake.go | 640 - vendor/golang.org/x/crypto/ssh/kex.go | 540 - vendor/golang.org/x/crypto/ssh/keys.go | 1006 - vendor/golang.org/x/crypto/ssh/mac.go | 61 - vendor/golang.org/x/crypto/ssh/messages.go | 758 - vendor/golang.org/x/crypto/ssh/mux.go | 330 - vendor/golang.org/x/crypto/ssh/server.go | 563 - vendor/golang.org/x/crypto/ssh/session.go | 627 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 115 - vendor/golang.org/x/crypto/ssh/tcpip.go | 465 - vendor/golang.org/x/crypto/ssh/transport.go | 375 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/context/context.go | 54 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 - vendor/golang.org/x/net/context/pre_go19.go | 109 - vendor/golang.org/x/net/html/atom/atom.go | 78 - vendor/golang.org/x/net/html/atom/table.go | 713 - vendor/golang.org/x/net/html/const.go | 102 - vendor/golang.org/x/net/html/doc.go | 106 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 -- vendor/golang.org/x/net/html/escape.go | 258 - vendor/golang.org/x/net/html/foreign.go | 226 - vendor/golang.org/x/net/html/node.go | 193 - vendor/golang.org/x/net/html/parse.go | 2094 -- vendor/golang.org/x/net/html/render.go | 271 - vendor/golang.org/x/net/html/token.go | 1219 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 - .../x/net/http2/client_conn_pool.go | 256 - .../x/net/http2/configure_transport.go | 80 - vendor/golang.org/x/net/http2/databuffer.go | 146 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/frame.go | 1579 -- vendor/golang.org/x/net/http2/go16.go | 16 - vendor/golang.org/x/net/http2/go17.go | 106 - vendor/golang.org/x/net/http2/go17_not18.go | 36 - vendor/golang.org/x/net/http2/go18.go | 56 - vendor/golang.org/x/net/http2/go19.go | 16 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 78 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - vendor/golang.org/x/net/http2/hpack/hpack.go | 490 - .../golang.org/x/net/http2/hpack/huffman.go | 212 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 - vendor/golang.org/x/net/http2/http2.go | 391 - vendor/golang.org/x/net/http2/not_go16.go | 21 - vendor/golang.org/x/net/http2/not_go17.go | 87 - vendor/golang.org/x/net/http2/not_go18.go | 29 - vendor/golang.org/x/net/http2/not_go19.go | 16 - vendor/golang.org/x/net/http2/pipe.go | 163 - vendor/golang.org/x/net/http2/server.go | 2857 --- vendor/golang.org/x/net/http2/transport.go | 2229 -- vendor/golang.org/x/net/http2/write.go | 370 - vendor/golang.org/x/net/http2/writesched.go | 242 - .../x/net/http2/writesched_priority.go | 452 - .../x/net/http2/writesched_random.go | 72 - vendor/golang.org/x/net/idna/idna.go | 680 - vendor/golang.org/x/net/idna/punycode.go | 203 - vendor/golang.org/x/net/idna/tables.go | 4477 ---- vendor/golang.org/x/net/idna/trie.go | 72 - vendor/golang.org/x/net/idna/trieval.go | 114 - .../x/net/internal/timeseries/timeseries.go | 525 - .../golang.org/x/net/lex/httplex/httplex.go | 351 - vendor/golang.org/x/net/trace/events.go | 532 - vendor/golang.org/x/net/trace/histogram.go | 365 - vendor/golang.org/x/net/trace/trace.go | 1082 - vendor/golang.org/x/net/trace/trace_go16.go | 21 - vendor/golang.org/x/net/trace/trace_go17.go | 21 - vendor/golang.org/x/sys/LICENSE | 27 - vendor/golang.org/x/sys/PATENTS | 22 - vendor/golang.org/x/sys/unix/README.md | 173 - vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 - .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 35 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 29 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 24 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 28 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 31 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 28 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 28 - vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 - .../golang.org/x/sys/unix/bluetooth_linux.go | 35 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 - vendor/golang.org/x/sys/unix/constants.go | 13 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - vendor/golang.org/x/sys/unix/dirent.go | 102 - vendor/golang.org/x/sys/unix/endian_big.go | 9 - vendor/golang.org/x/sys/unix/endian_little.go | 9 - vendor/golang.org/x/sys/unix/env_unix.go | 27 - vendor/golang.org/x/sys/unix/env_unset.go | 14 - .../x/sys/unix/errors_freebsd_386.go | 227 - .../x/sys/unix/errors_freebsd_amd64.go | 227 - .../x/sys/unix/errors_freebsd_arm.go | 226 - vendor/golang.org/x/sys/unix/file_unix.go | 27 - vendor/golang.org/x/sys/unix/flock.go | 22 - .../x/sys/unix/flock_linux_32bit.go | 13 - vendor/golang.org/x/sys/unix/gccgo.go | 46 - vendor/golang.org/x/sys/unix/gccgo_c.c | 41 - .../x/sys/unix/gccgo_linux_amd64.go | 20 - .../x/sys/unix/gccgo_linux_sparc64.go | 20 - vendor/golang.org/x/sys/unix/mkall.sh | 197 - vendor/golang.org/x/sys/unix/mkerrors.sh | 559 - vendor/golang.org/x/sys/unix/mksyscall.pl | 328 - .../x/sys/unix/mksyscall_solaris.pl | 289 - .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 - .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 - .../x/sys/unix/mksysnum_dragonfly.pl | 50 - .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 50 - .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 - .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 - .../golang.org/x/sys/unix/openbsd_pledge.go | 38 - vendor/golang.org/x/sys/unix/race.go | 30 - vendor/golang.org/x/sys/unix/race0.go | 25 - .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 104 - vendor/golang.org/x/sys/unix/str.go | 26 - vendor/golang.org/x/sys/unix/syscall.go | 69 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 635 - .../golang.org/x/sys/unix/syscall_darwin.go | 536 - .../x/sys/unix/syscall_darwin_386.go | 77 - .../x/sys/unix/syscall_darwin_amd64.go | 77 - .../x/sys/unix/syscall_darwin_arm.go | 71 - .../x/sys/unix/syscall_darwin_arm64.go | 77 - .../x/sys/unix/syscall_dragonfly.go | 415 - .../x/sys/unix/syscall_dragonfly_amd64.go | 61 - .../golang.org/x/sys/unix/syscall_freebsd.go | 708 - .../x/sys/unix/syscall_freebsd_386.go | 61 - .../x/sys/unix/syscall_freebsd_amd64.go | 61 - .../x/sys/unix/syscall_freebsd_arm.go | 61 - vendor/golang.org/x/sys/unix/syscall_linux.go | 1459 -- .../x/sys/unix/syscall_linux_386.go | 399 - .../x/sys/unix/syscall_linux_amd64.go | 152 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../x/sys/unix/syscall_linux_arm.go | 263 - .../x/sys/unix/syscall_linux_arm64.go | 190 - .../x/sys/unix/syscall_linux_mips64x.go | 209 - .../x/sys/unix/syscall_linux_mipsx.go | 239 - .../x/sys/unix/syscall_linux_ppc64x.go | 135 - .../x/sys/unix/syscall_linux_s390x.go | 328 - .../x/sys/unix/syscall_linux_sparc64.go | 169 - .../golang.org/x/sys/unix/syscall_netbsd.go | 472 - .../x/sys/unix/syscall_netbsd_386.go | 42 - .../x/sys/unix/syscall_netbsd_amd64.go | 42 - .../x/sys/unix/syscall_netbsd_arm.go | 42 - .../golang.org/x/sys/unix/syscall_no_getwd.go | 11 - .../golang.org/x/sys/unix/syscall_openbsd.go | 282 - .../x/sys/unix/syscall_openbsd_386.go | 42 - .../x/sys/unix/syscall_openbsd_amd64.go | 42 - .../x/sys/unix/syscall_openbsd_arm.go | 44 - .../golang.org/x/sys/unix/syscall_solaris.go | 716 - .../x/sys/unix/syscall_solaris_amd64.go | 35 - vendor/golang.org/x/sys/unix/syscall_unix.go | 293 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 - .../x/sys/unix/zerrors_darwin_386.go | 1673 -- .../x/sys/unix/zerrors_darwin_amd64.go | 1673 -- .../x/sys/unix/zerrors_darwin_arm.go | 1673 -- .../x/sys/unix/zerrors_darwin_arm64.go | 1673 -- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1568 -- .../x/sys/unix/zerrors_freebsd_386.go | 1706 -- .../x/sys/unix/zerrors_freebsd_amd64.go | 1707 -- .../x/sys/unix/zerrors_freebsd_arm.go | 1715 -- .../x/sys/unix/zerrors_linux_386.go | 2180 -- .../x/sys/unix/zerrors_linux_amd64.go | 2181 -- .../x/sys/unix/zerrors_linux_arm.go | 2185 -- .../x/sys/unix/zerrors_linux_arm64.go | 2170 -- .../x/sys/unix/zerrors_linux_mips.go | 2189 -- .../x/sys/unix/zerrors_linux_mips64.go | 2189 -- .../x/sys/unix/zerrors_linux_mips64le.go | 2189 -- .../x/sys/unix/zerrors_linux_mipsle.go | 2189 -- .../x/sys/unix/zerrors_linux_ppc64.go | 2243 -- .../x/sys/unix/zerrors_linux_ppc64le.go | 2243 -- .../x/sys/unix/zerrors_linux_s390x.go | 2242 -- .../x/sys/unix/zerrors_linux_sparc64.go | 2142 -- .../x/sys/unix/zerrors_netbsd_386.go | 1712 -- .../x/sys/unix/zerrors_netbsd_amd64.go | 1702 -- .../x/sys/unix/zerrors_netbsd_arm.go | 1691 -- .../x/sys/unix/zerrors_openbsd_386.go | 1584 -- .../x/sys/unix/zerrors_openbsd_amd64.go | 1583 -- .../x/sys/unix/zerrors_openbsd_arm.go | 1586 -- .../x/sys/unix/zerrors_solaris_amd64.go | 1483 -- .../x/sys/unix/zsyscall_darwin_386.go | 1609 -- .../x/sys/unix/zsyscall_darwin_amd64.go | 1609 -- .../x/sys/unix/zsyscall_darwin_arm.go | 1609 -- .../x/sys/unix/zsyscall_darwin_arm64.go | 1609 -- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1440 -- .../x/sys/unix/zsyscall_freebsd_386.go | 1877 -- .../x/sys/unix/zsyscall_freebsd_amd64.go | 1877 -- .../x/sys/unix/zsyscall_freebsd_arm.go | 1877 -- .../x/sys/unix/zsyscall_linux_386.go | 1953 -- .../x/sys/unix/zsyscall_linux_amd64.go | 2146 -- .../x/sys/unix/zsyscall_linux_arm.go | 2055 -- .../x/sys/unix/zsyscall_linux_arm64.go | 2029 -- .../x/sys/unix/zsyscall_linux_mips.go | 2111 -- .../x/sys/unix/zsyscall_linux_mips64.go | 2105 -- .../x/sys/unix/zsyscall_linux_mips64le.go | 2105 -- .../x/sys/unix/zsyscall_linux_mipsle.go | 2111 -- .../x/sys/unix/zsyscall_linux_ppc64.go | 2157 -- .../x/sys/unix/zsyscall_linux_ppc64le.go | 2157 -- .../x/sys/unix/zsyscall_linux_s390x.go | 1937 -- .../x/sys/unix/zsyscall_linux_sparc64.go | 1833 -- .../x/sys/unix/zsyscall_netbsd_386.go | 1346 -- .../x/sys/unix/zsyscall_netbsd_amd64.go | 1346 -- .../x/sys/unix/zsyscall_netbsd_arm.go | 1346 -- .../x/sys/unix/zsyscall_openbsd_386.go | 1404 -- .../x/sys/unix/zsyscall_openbsd_amd64.go | 1404 -- .../x/sys/unix/zsyscall_openbsd_arm.go | 1404 -- .../x/sys/unix/zsyscall_solaris_amd64.go | 1600 -- .../golang.org/x/sys/unix/zsysctl_openbsd.go | 270 - .../x/sys/unix/zsysnum_darwin_386.go | 398 - .../x/sys/unix/zsysnum_darwin_amd64.go | 398 - .../x/sys/unix/zsysnum_darwin_arm.go | 426 - .../x/sys/unix/zsysnum_darwin_arm64.go | 426 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 - .../x/sys/unix/zsysnum_freebsd_386.go | 353 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 353 - .../x/sys/unix/zsysnum_freebsd_arm.go | 353 - .../x/sys/unix/zsysnum_linux_386.go | 388 - .../x/sys/unix/zsysnum_linux_amd64.go | 341 - .../x/sys/unix/zsysnum_linux_arm.go | 361 - .../x/sys/unix/zsysnum_linux_arm64.go | 285 - .../x/sys/unix/zsysnum_linux_mips.go | 374 - .../x/sys/unix/zsysnum_linux_mips64.go | 334 - .../x/sys/unix/zsysnum_linux_mips64le.go | 334 - .../x/sys/unix/zsysnum_linux_mipsle.go | 374 - .../x/sys/unix/zsysnum_linux_ppc64.go | 369 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 369 - .../x/sys/unix/zsysnum_linux_s390x.go | 331 - .../x/sys/unix/zsysnum_linux_sparc64.go | 348 - .../x/sys/unix/zsysnum_netbsd_386.go | 274 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 - .../x/sys/unix/zsysnum_netbsd_arm.go | 274 - .../x/sys/unix/zsysnum_openbsd_386.go | 207 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 207 - .../x/sys/unix/zsysnum_openbsd_arm.go | 213 - .../x/sys/unix/zsysnum_solaris_amd64.go | 13 - .../x/sys/unix/ztypes_darwin_386.go | 462 - .../x/sys/unix/ztypes_darwin_amd64.go | 472 - .../x/sys/unix/ztypes_darwin_arm.go | 463 - .../x/sys/unix/ztypes_darwin_arm64.go | 471 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 448 - .../x/sys/unix/ztypes_freebsd_386.go | 521 - .../x/sys/unix/ztypes_freebsd_amd64.go | 524 - .../x/sys/unix/ztypes_freebsd_arm.go | 524 - .../golang.org/x/sys/unix/ztypes_linux_386.go | 694 - .../x/sys/unix/ztypes_linux_amd64.go | 712 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 683 - .../x/sys/unix/ztypes_linux_arm64.go | 691 - .../x/sys/unix/ztypes_linux_mips.go | 688 - .../x/sys/unix/ztypes_linux_mips64.go | 693 - .../x/sys/unix/ztypes_linux_mips64le.go | 693 - .../x/sys/unix/ztypes_linux_mipsle.go | 688 - .../x/sys/unix/ztypes_linux_ppc64.go | 701 - .../x/sys/unix/ztypes_linux_ppc64le.go | 701 - .../x/sys/unix/ztypes_linux_s390x.go | 718 - .../x/sys/unix/ztypes_linux_sparc64.go | 666 - .../x/sys/unix/ztypes_netbsd_386.go | 401 - .../x/sys/unix/ztypes_netbsd_amd64.go | 408 - .../x/sys/unix/ztypes_netbsd_arm.go | 406 - .../x/sys/unix/ztypes_openbsd_386.go | 446 - .../x/sys/unix/ztypes_openbsd_amd64.go | 453 - .../x/sys/unix/ztypes_openbsd_arm.go | 439 - .../x/sys/unix/ztypes_solaris_amd64.go | 442 - vendor/golang.org/x/text/LICENSE | 27 - vendor/golang.org/x/text/PATENTS | 22 - .../x/text/secure/bidirule/bidirule.go | 342 - .../golang.org/x/text/transform/transform.go | 705 - vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 - .../golang.org/x/text/unicode/bidi/bracket.go | 335 - vendor/golang.org/x/text/unicode/bidi/core.go | 1058 - vendor/golang.org/x/text/unicode/bidi/prop.go | 206 - .../golang.org/x/text/unicode/bidi/tables.go | 1779 -- .../golang.org/x/text/unicode/bidi/trieval.go | 60 - .../x/text/unicode/norm/composition.go | 508 - .../x/text/unicode/norm/forminfo.go | 259 - .../golang.org/x/text/unicode/norm/input.go | 109 - vendor/golang.org/x/text/unicode/norm/iter.go | 457 - .../x/text/unicode/norm/normalize.go | 609 - .../x/text/unicode/norm/readwriter.go | 125 - .../golang.org/x/text/unicode/norm/tables.go | 7631 ------ .../x/text/unicode/norm/transform.go | 88 - vendor/golang.org/x/text/unicode/norm/trie.go | 54 - vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 143 - vendor/google.golang.org/grpc/AUTHORS | 1 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 32 - vendor/google.golang.org/grpc/LICENSE | 202 - vendor/google.golang.org/grpc/Makefile | 49 - vendor/google.golang.org/grpc/README.md | 45 - vendor/google.golang.org/grpc/backoff.go | 98 - vendor/google.golang.org/grpc/balancer.go | 408 - vendor/google.golang.org/grpc/call.go | 309 - vendor/google.golang.org/grpc/clientconn.go | 1248 - vendor/google.golang.org/grpc/codec.go | 104 - vendor/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 16 - vendor/google.golang.org/grpc/codes/codes.go | 144 - .../grpc/connectivity/connectivity.go | 72 - vendor/google.golang.org/grpc/coverage.sh | 48 - .../grpc/credentials/credentials.go | 219 - .../grpc/credentials/credentials_util_go17.go | 60 - .../grpc/credentials/credentials_util_go18.go | 38 - .../credentials/credentials_util_pre_go17.go | 57 - vendor/google.golang.org/grpc/doc.go | 24 - vendor/google.golang.org/grpc/go16.go | 98 - vendor/google.golang.org/grpc/go17.go | 98 - vendor/google.golang.org/grpc/grpclb.go | 737 - .../messages_only/grpc_lb_v1/grpclb.pb.go | 629 - .../google.golang.org/grpc/grpclog/grpclog.go | 123 - .../google.golang.org/grpc/grpclog/logger.go | 83 - .../grpc/grpclog/loggerv2.go | 195 - .../grpc/health/grpc_health_v1/health.pb.go | 190 - .../grpc/health/grpc_health_v1/health.proto | 34 - .../google.golang.org/grpc/health/health.go | 72 - .../grpc/install-protobuf.sh | 23 - vendor/google.golang.org/grpc/interceptor.go | 75 - .../grpc/internal/internal.go | 34 - .../grpc/keepalive/keepalive.go | 65 - .../grpc/metadata/metadata.go | 137 - .../grpc/naming/dns_resolver.go | 290 - vendor/google.golang.org/grpc/naming/go17.go | 34 - vendor/google.golang.org/grpc/naming/go18.go | 28 - .../google.golang.org/grpc/naming/naming.go | 59 - vendor/google.golang.org/grpc/peer/peer.go | 51 - vendor/google.golang.org/grpc/proxy.go | 130 - vendor/google.golang.org/grpc/rpc_util.go | 525 - vendor/google.golang.org/grpc/server.go | 1160 - .../google.golang.org/grpc/stats/handlers.go | 64 - vendor/google.golang.org/grpc/stats/stats.go | 296 - .../google.golang.org/grpc/status/status.go | 168 - vendor/google.golang.org/grpc/stream.go | 669 - vendor/google.golang.org/grpc/tap/tap.go | 51 - vendor/google.golang.org/grpc/trace.go | 104 - .../grpc/transport/bdp_estimator.go | 143 - .../grpc/transport/control.go | 246 - .../google.golang.org/grpc/transport/go16.go | 45 - .../google.golang.org/grpc/transport/go17.go | 46 - .../grpc/transport/handler_server.go | 416 - .../grpc/transport/http2_client.go | 1370 -- .../grpc/transport/http2_server.go | 1201 - .../grpc/transport/http_util.go | 618 - .../google.golang.org/grpc/transport/log.go | 50 - .../grpc/transport/transport.go | 730 - vendor/vendor.json | 1026 - 1187 files changed, 515 insertions(+), 356852 deletions(-) create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/github.com/apparentlymart/go-cidr/LICENSE delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go delete mode 100644 vendor/github.com/armon/go-radix/LICENSE delete mode 100644 vendor/github.com/armon/go-radix/README.md delete mode 100644 vendor/github.com/armon/go-radix/radix.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go delete mode 100644 vendor/github.com/bgentry/go-netrc/LICENSE delete mode 100644 vendor/github.com/bgentry/go-netrc/netrc/netrc.go delete mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE delete mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS delete mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go delete mode 100644 vendor/github.com/blang/semver/LICENSE delete mode 100644 vendor/github.com/blang/semver/README.md delete mode 100644 vendor/github.com/blang/semver/json.go delete mode 100644 vendor/github.com/blang/semver/package.json delete mode 100644 vendor/github.com/blang/semver/range.go delete mode 100644 vendor/github.com/blang/semver/semver.go delete mode 100644 vendor/github.com/blang/semver/sort.go delete mode 100644 vendor/github.com/blang/semver/sql.go delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/github.com/go-ini/ini/Makefile delete mode 100644 vendor/github.com/go-ini/ini/README.md delete mode 100644 vendor/github.com/go-ini/ini/README_ZH.md delete mode 100644 vendor/github.com/go-ini/ini/error.go delete mode 100644 vendor/github.com/go-ini/ini/ini.go delete mode 100644 vendor/github.com/go-ini/ini/key.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/section.go delete mode 100644 vendor/github.com/go-ini/ini/struct.go delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/Makefile delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100755 vendor/github.com/golang/protobuf/ptypes/regen.sh delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100755 vendor/github.com/gucumber/gucumber/LICENSE.txt delete mode 100755 vendor/github.com/gucumber/gucumber/gherkin/i18n.go delete mode 100644 vendor/github.com/gucumber/gucumber/gherkin/parser.go delete mode 100755 vendor/github.com/gucumber/gucumber/gherkin/types.go delete mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE delete mode 100644 vendor/github.com/hashicorp/errwrap/README.md delete mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go delete mode 100644 vendor/github.com/hashicorp/go-getter/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-getter/README.md delete mode 100644 vendor/github.com/hashicorp/go-getter/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/go-getter/client.go delete mode 100644 vendor/github.com/hashicorp/go-getter/client_mode.go delete mode 100644 vendor/github.com/hashicorp/go-getter/copy_dir.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_bzip2.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_gzip.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tar.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tbz2.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_testing.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tgz.go delete mode 100644 vendor/github.com/hashicorp/go-getter/decompress_zip.go delete mode 100644 vendor/github.com/hashicorp/go-getter/detect.go delete mode 100644 vendor/github.com/hashicorp/go-getter/detect_bitbucket.go delete mode 100644 vendor/github.com/hashicorp/go-getter/detect_file.go delete mode 100644 vendor/github.com/hashicorp/go-getter/detect_github.go delete mode 100644 vendor/github.com/hashicorp/go-getter/detect_s3.go delete mode 100644 vendor/github.com/hashicorp/go-getter/folder_storage.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_file.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_file_unix.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_file_windows.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_git.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_hg.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_http.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_mock.go delete mode 100644 vendor/github.com/hashicorp/go-getter/get_s3.go delete mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url.go delete mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go delete mode 100644 vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go delete mode 100644 vendor/github.com/hashicorp/go-getter/netrc.go delete mode 100644 vendor/github.com/hashicorp/go-getter/source.go delete mode 100644 vendor/github.com/hashicorp/go-getter/storage.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-hclog/README.md delete mode 100644 vendor/github.com/hashicorp/go-hclog/global.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/int.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/log.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/stacktrace.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/stdlog.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile delete mode 100644 vendor/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-plugin/README.md delete mode 100644 vendor/github.com/hashicorp/go-plugin/client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/discover.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/error.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/mux_broker.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/plugin.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process_posix.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process_windows.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server_mux.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/stream.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/testing.go delete mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-uuid/README.md delete mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go delete mode 100644 vendor/github.com/hashicorp/go-version/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-version/README.md delete mode 100644 vendor/github.com/hashicorp/go-version/constraint.go delete mode 100644 vendor/github.com/hashicorp/go-version/version.go delete mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go delete mode 100644 vendor/github.com/hashicorp/hcl/LICENSE delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go delete mode 100644 vendor/github.com/hashicorp/hil/LICENSE delete mode 100644 vendor/github.com/hashicorp/hil/README.md delete mode 100644 vendor/github.com/hashicorp/hil/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic_op.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/call.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/conditional.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/index.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/literal.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/output.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/scope.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/stack.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/type_string.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/unknown.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variable_access.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variables_helper.go delete mode 100644 vendor/github.com/hashicorp/hil/builtins.go delete mode 100644 vendor/github.com/hashicorp/hil/check_identifier.go delete mode 100644 vendor/github.com/hashicorp/hil/check_types.go delete mode 100644 vendor/github.com/hashicorp/hil/convert.go delete mode 100644 vendor/github.com/hashicorp/hil/eval.go delete mode 100644 vendor/github.com/hashicorp/hil/eval_type.go delete mode 100644 vendor/github.com/hashicorp/hil/evaltype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/parse.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/binary_op.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/fuzz.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/peeker.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/token.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/transform_fixed.go delete mode 100644 vendor/github.com/hashicorp/hil/walk.go delete mode 100644 vendor/github.com/hashicorp/logutils/LICENSE delete mode 100644 vendor/github.com/hashicorp/logutils/README.md delete mode 100644 vendor/github.com/hashicorp/logutils/level.go delete mode 100644 vendor/github.com/hashicorp/terraform/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform/config/append.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_terraform.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_tree.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/import_tree.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate_walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/lang.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/loader.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/merge.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/copy_dir.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/get.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/inode_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/module.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/tree.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/tree_gob.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/providers.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/provisioner_enums.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/raw_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/dag.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/dot.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/edge.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/marshal.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/set.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/tarjan.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/flatten.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/acctest/random.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/config/decode.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/config/validator.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/id.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/logging.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/transport.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/mutexkv/mutexkv.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/error.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/id.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/wait.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/README.md delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/equal.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/schema.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/serialize.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/set.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/closer.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/value.go delete mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go delete mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/module.go delete mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/error.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/find.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/get.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/plugin.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/serve.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/ui_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_components.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_import.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/debug.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_error.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_if.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_noop.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_dot.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_stop.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/interpolate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/path.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_address.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/semantics.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_components.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_add.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_filter.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_v1.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/test_failure delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_reference.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_root.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_targets.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/user_agent.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/util.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/version.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/version_required.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go delete mode 100644 vendor/github.com/hashicorp/yamux/LICENSE delete mode 100644 vendor/github.com/hashicorp/yamux/README.md delete mode 100644 vendor/github.com/hashicorp/yamux/addr.go delete mode 100644 vendor/github.com/hashicorp/yamux/const.go delete mode 100644 vendor/github.com/hashicorp/yamux/mux.go delete mode 100644 vendor/github.com/hashicorp/yamux/session.go delete mode 100644 vendor/github.com/hashicorp/yamux/spec.md delete mode 100644 vendor/github.com/hashicorp/yamux/stream.go delete mode 100644 vendor/github.com/hashicorp/yamux/util.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/util.go delete mode 100755 vendor/github.com/lsegal/gucumber/LICENSE.txt delete mode 100644 vendor/github.com/lsegal/gucumber/Makefile delete mode 100755 vendor/github.com/lsegal/gucumber/README.md delete mode 100644 vendor/github.com/lsegal/gucumber/builder.go delete mode 100644 vendor/github.com/lsegal/gucumber/run_main.go delete mode 100755 vendor/github.com/lsegal/gucumber/runner.go delete mode 100644 vendor/github.com/lsegal/gucumber/stepdef.go delete mode 100644 vendor/github.com/mattn/go-isatty/LICENSE delete mode 100644 vendor/github.com/mattn/go-isatty/README.md delete mode 100644 vendor/github.com/mattn/go-isatty/doc.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go delete mode 100644 vendor/github.com/mitchellh/cli/LICENSE delete mode 100644 vendor/github.com/mitchellh/cli/Makefile delete mode 100644 vendor/github.com/mitchellh/cli/README.md delete mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go delete mode 100644 vendor/github.com/mitchellh/cli/cli.go delete mode 100644 vendor/github.com/mitchellh/cli/command.go delete mode 100644 vendor/github.com/mitchellh/cli/command_mock.go delete mode 100644 vendor/github.com/mitchellh/cli/help.go delete mode 100644 vendor/github.com/mitchellh/cli/ui.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go delete mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/copystructure/README.md delete mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go delete mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/hashstructure/README.md delete mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/include.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE delete mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/posener/complete/LICENSE.txt delete mode 100644 vendor/github.com/posener/complete/args.go delete mode 100644 vendor/github.com/posener/complete/cmd/cmd.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/install.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go delete mode 100644 vendor/github.com/posener/complete/command.go delete mode 100644 vendor/github.com/posener/complete/complete.go delete mode 100644 vendor/github.com/posener/complete/log.go delete mode 100644 vendor/github.com/posener/complete/match/file.go delete mode 100644 vendor/github.com/posener/complete/match/match.go delete mode 100644 vendor/github.com/posener/complete/match/prefix.go delete mode 100644 vendor/github.com/posener/complete/metalinter.json delete mode 100644 vendor/github.com/posener/complete/predict.go delete mode 100644 vendor/github.com/posener/complete/predict_files.go delete mode 100644 vendor/github.com/posener/complete/predict_set.go delete mode 100644 vendor/github.com/posener/complete/readme.md delete mode 100755 vendor/github.com/posener/complete/test.sh delete mode 100644 vendor/github.com/posener/complete/utils.go delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE delete mode 100644 vendor/github.com/satori/go.uuid/README.md delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go delete mode 100644 vendor/github.com/shiena/ansicolor/LICENSE delete mode 100644 vendor/github.com/shiena/ansicolor/README.md delete mode 100644 vendor/github.com/shiena/ansicolor/ansicolor.go delete mode 100644 vendor/github.com/shiena/ansicolor/ansicolor_ansi.go delete mode 100644 vendor/github.com/shiena/ansicolor/ansicolor_windows.go delete mode 100644 vendor/github.com/sky-uk/gonsx/CONTRIBUTING.md delete mode 100644 vendor/github.com/sky-uk/gonsx/Dockerfile delete mode 100644 vendor/github.com/sky-uk/gonsx/Jenkinsfile delete mode 100644 vendor/github.com/sky-uk/gonsx/LICENSE delete mode 100644 vendor/github.com/sky-uk/gonsx/Makefile delete mode 100644 vendor/github.com/sky-uk/gonsx/README.md delete mode 100644 vendor/github.com/sky-uk/gonsx/VERSION delete mode 100644 vendor/github.com/sky-uk/gonsx/api/base_api.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/create_dhcprelay.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/delete_dhcprelay.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/get_all_dhcprelay.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/dhcprelay/update_dhcprelay.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/create_edgeinterface.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/delete_edgeinterface.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_all_edgeinterfaces.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_edgeinterface.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/edgeinterface/update_edgeinterface.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/firewallexclusion/create_firewallexclusion.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/firewallexclusion/delete_firewallexclusion.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/firewallexclusion/get_all_firewallexclusion.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/nsx_api.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/create_securitygroup.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/delete_securitygroup.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/get_all_securitygroup.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitygroup/update_securitygroup.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/create_securitypolicy.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/delete_securitypolicy.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_all_securitypolicy.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_securitypolicy.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitypolicy/update_securitypolicy.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/assign_securitytag.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/create_securitytag.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/delete_securitytag.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/detach_securitytag.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_attached_securitytags.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags_attached_to_vm.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/update_attached_securitytags.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/securitytag/update_securitytag.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/create_service.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/delete_service.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/get_all_service.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/service_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/service_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/service/update_service.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/tzone/get_all_tzone.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/tzone/get_tzone.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/create_virtualwire.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/delete_virtualwire.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/get_all_virtualwire.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/get_virtualwire.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/update_virtualwire.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types.go delete mode 100644 vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types_functions.go delete mode 100644 vendor/github.com/sky-uk/gonsx/client.go delete mode 100644 vendor/github.com/sky-uk/gonsx/docker-compose.yml delete mode 100644 vendor/github.com/stretchr/testify/LICENCE.txt delete mode 100644 vendor/github.com/stretchr/testify/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go delete mode 100644 vendor/golang.org/x/net/http2/go17_not18.go delete mode 100644 vendor/golang.org/x/net/http2/go18.go delete mode 100644 vendor/golang.org/x/net/http2/go19.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/http2/not_go18.go delete mode 100644 vendor/golang.org/x/net/http2/not_go19.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/idna/idna.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/idna/tables.go delete mode 100644 vendor/golang.org/x/net/idna/trie.go delete mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go16.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go17.go delete mode 100644 vendor/golang.org/x/sys/LICENSE delete mode 100644 vendor/golang.org/x/sys/PATENTS delete mode 100644 vendor/golang.org/x/sys/unix/README.md delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/constants.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unset.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/file_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/flock.go delete mode 100644 vendor/golang.org/x/sys/unix/flock_linux_32bit.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go delete mode 100755 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100755 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl delete mode 100644 vendor/golang.org/x/sys/unix/openbsd_pledge.go delete mode 100644 vendor/golang.org/x/sys/unix/race.go delete mode 100644 vendor/golang.org/x/sys/unix/race0.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/str.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_no_getwd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 vendor/golang.org/x/text/LICENSE delete mode 100644 vendor/golang.org/x/text/PATENTS delete mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go delete mode 100644 vendor/golang.org/x/text/transform/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/tables.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/Makefile delete mode 100644 vendor/google.golang.org/grpc/README.md delete mode 100644 vendor/google.golang.org/grpc/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go delete mode 100644 vendor/google.golang.org/grpc/call.go delete mode 100644 vendor/google.golang.org/grpc/clientconn.go delete mode 100644 vendor/google.golang.org/grpc/codec.go delete mode 100755 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100755 vendor/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go18.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go delete mode 100644 vendor/google.golang.org/grpc/doc.go delete mode 100644 vendor/google.golang.org/grpc/go16.go delete mode 100644 vendor/google.golang.org/grpc/go17.go delete mode 100644 vendor/google.golang.org/grpc/grpclb.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/messages_only/grpc_lb_v1/grpclb.pb.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto delete mode 100644 vendor/google.golang.org/grpc/health/health.go delete mode 100755 vendor/google.golang.org/grpc/install-protobuf.sh delete mode 100644 vendor/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/go17.go delete mode 100644 vendor/google.golang.org/grpc/naming/go18.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/proxy.go delete mode 100644 vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/google.golang.org/grpc/server.go delete mode 100644 vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/grpc/stream.go delete mode 100644 vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 vendor/google.golang.org/grpc/trace.go delete mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/transport/control.go delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go delete mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go delete mode 100644 vendor/google.golang.org/grpc/transport/http2_client.go delete mode 100644 vendor/google.golang.org/grpc/transport/http2_server.go delete mode 100644 vendor/google.golang.org/grpc/transport/http_util.go delete mode 100644 vendor/google.golang.org/grpc/transport/log.go delete mode 100644 vendor/google.golang.org/grpc/transport/transport.go delete mode 100644 vendor/vendor.json diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..86ddab9 --- /dev/null +++ b/go.mod @@ -0,0 +1,51 @@ +module github.com/sky-uk/terraform-provider-nsx + +go 1.12 + +require ( + github.com/apparentlymart/go-cidr v1.0.0 + github.com/armon/go-radix v1.0.0 + github.com/aws/aws-sdk-go v1.19.18 + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d + github.com/bgentry/speakeasy v0.1.0 + github.com/blang/semver v3.5.1+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/go-ini/ini v1.28.1 + github.com/golang/protobuf v1.3.0 + github.com/gucumber/gucumber v0.0.0-20160715015914-71608e2f6e76 + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-getter v1.3.0 + github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f + github.com/hashicorp/go-multierror v1.0.0 + github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb + github.com/hashicorp/go-uuid v1.0.1 + github.com/hashicorp/go-version v1.1.0 + github.com/hashicorp/hcl v0.0.0-20170509225359-392dba7d905e + github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 + github.com/hashicorp/logutils v1.0.0 + github.com/hashicorp/terraform v0.12.1 + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af + github.com/lsegal/gucumber v0.0.0-20160715015914-71608e2f6e76 + github.com/mattn/go-isatty v0.0.5 + github.com/mitchellh/cli v1.0.0 + github.com/mitchellh/copystructure v1.0.0 + github.com/mitchellh/go-homedir v1.0.0 + github.com/mitchellh/go-testing-interface v1.0.0 + github.com/mitchellh/hashstructure v1.0.0 + github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/reflectwalk v1.0.0 + github.com/pmezard/go-difflib v1.0.0 + github.com/posener/complete v1.2.1 + github.com/satori/go.uuid v1.2.0 + github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 + github.com/sky-uk/gonsx v0.0.0-20180122153724-c3caef9aee9b + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 + golang.org/x/net v0.0.0-20190502183928-7f726cade0ab + golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 + golang.org/x/text v0.3.2 + google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 + google.golang.org/grpc v1.18.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f0be490 --- /dev/null +++ b/go.sum @@ -0,0 +1,464 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/ArthurHlt/gonsx v0.0.0-fix-proxy h1:xFjO3KOSnqeB3GGmcmCJsV0p681IVX6kDozSabxw+As= +github.com/ArthurHlt/gonsx v0.0.0-fix-proxy/go.mod h1:67OkTkXtXC50mtTtEfL1cw4k8Xo2M/gIanbmV5sDixI= +github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= +github.com/apparentlymart/go-cidr v0.0.0-20170616213631-2bd8b58cf427 h1:2P/DTyNDU+7qJOB6E5KeIpdc3qcT9IYjyA8hZ9HGz50= +github.com/apparentlymart/go-cidr v0.0.0-20170616213631-2bd8b58cf427/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-cidr v1.0.0 h1:lGDvXx8Lv9QHjrAVP7jyzleG4F9+FkRhJcEsDFxeb8w= +github.com/apparentlymart/go-cidr v1.0.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20170727155443-1fca145dffbc h1:/WQ8Tr5zbclKWAtvafIcAk/njNpW3gtd22TLLouv+6Q= +github.com/armon/go-radix v0.0.0-20170727155443-1fca145dffbc/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.1.23 h1:tbqe8vIinMVLIdZsx9XYpex3dHs+vT6ZPFcgnw/fXU8= +github.com/aws/aws-sdk-go v1.1.23/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.18 h1:Hb3+b9HCqrOrbAtFstUWg7H5TQ+/EcklJtE8VShVs8o= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= +github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-ini/ini v1.28.1 h1:31infb1d2q1XtBq7msGogTnG0vODN/5poMiHg+cpXQw= +github.com/go-ini/ini v1.28.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v0.0.0-20170816001514-ab9f9a6dab16 h1:aJ1UpMAmhmRgprioyAmDMEDe5cVKlbWs3HDUZw9rT4E= +github.com/golang/protobuf v0.0.0-20170816001514-ab9f9a6dab16/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gucumber/gucumber v0.0.0-20160715015914-71608e2f6e76/go.mod h1:YbdHRK9ViqwGMS0rtRY+1I6faHvVyyurKPIPwifihxI= +github.com/hashicorp/aws-sdk-go-base v0.2.0/go.mod h1:ZIWACGGi0N7a4DZbf15yuE1JQORmWLtBcVM6F5SXNFU= +github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7 h1:67fHcS+inUoiIqWCKIqeDuq2AlPHNHPiTqp97LdQ+bc= +github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v0.0.0-20170504171234-90b6568eac83 h1:3Kp9VpeMjZAgxK/kGw6uRkT5jl0rKTK9Qg97ZhrHwjk= +github.com/hashicorp/go-getter v0.0.0-20170504171234-90b6568eac83/go.mod h1:6rdJFnhkXnzGOJbvkrdv4t9nLwKcVA+tmbQeUlkIzrU= +github.com/hashicorp/go-getter v1.3.0 h1:pFMSFlI9l5NaeuzkpE3L7BYk9qQ9juTAgXW/H0cqxcU= +github.com/hashicorp/go-getter v1.3.0/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ= +github.com/hashicorp/go-hclog v0.0.0-20170716174523-b4e5765d1e5f h1:5onjUM14Pu2IrXp+iFQJYxswZoCn8PmSvVQ6IOic8uE= +github.com/hashicorp/go-hclog v0.0.0-20170716174523-b4e5765d1e5f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= +github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20170622060955-83588e72410a h1:RUacJnONqfKgDeok3I3IqMa8e5+B3qzBIbNK4dZK65k= +github.com/hashicorp/go-multierror v0.0.0-20170622060955-83588e72410a/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v0.0.0-20170816151819-a5174f84d7f8 h1:1M/vS848XLVjNyE7waQYdRErztf5MU33jo5IPBfzLag= +github.com/hashicorp/go-plugin v0.0.0-20170816151819-a5174f84d7f8/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= +github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb h1:Zg2pmmk0lrLFL85lQGt08bOUBpIBaVs6/psiAyx0c4w= +github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-slug v0.3.0/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= +github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-tfe v0.3.16/go.mod h1:SuPHR+OcxvzBZNye7nGPfwZTEyd3rWPfLVbCgyZPezM= +github.com/hashicorp/go-uuid v0.0.0-20160717022140-64130c7a86d7 h1:w8czuEDeFZo7CdZ2APLO24tTbjzO4cNXTfCkBdtU+wg= +github.com/hashicorp/go-uuid v0.0.0-20160717022140-64130c7a86d7/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v0.0.0-20170202080759-03c5bf6be031 h1:c3Xdf5fTpk+hqhxqCO+ymqjfUXV9+GZqNgTtlnVzDos= +github.com/hashicorp/go-version v0.0.0-20170202080759-03c5bf6be031/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v0.0.0-20170509225359-392dba7d905e h1:KJWs1uTCkN3E/J5ofCH9Pf8KKsibTFc3fv0CA9+WsVo= +github.com/hashicorp/hcl v0.0.0-20170509225359-392dba7d905e/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE= +github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef h1:xZRvbcwHY8zhaxDwgkmpAp2emwZkVn7p3gat0zhq2X0= +github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef/go.mod h1:4oI94iqF3GB10QScn46WqbG0kgTUpha97SAzzg2+2ec= +github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250 h1:fooK5IvDL/KIsi4LxF/JH68nVdrBSiGNPhS2JAQjtjo= +github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/terraform v0.10.2 h1:JEQzSu7Sg9/84u6XA7pRMLbYuLguwnmMcdyRJHD9Qt8= +github.com/hashicorp/terraform v0.10.2/go.mod h1:uN1KUiT7Wdg61fPwsGXQwK3c8PmpIVZrt5Vcb1VrSoM= +github.com/hashicorp/terraform v0.12.1 h1:A56pUiGbDZ372cvDKWixzVciTGZkqZ01hH58GTNQ4lg= +github.com/hashicorp/terraform v0.12.1/go.mod h1:T3xp83QKah46oRMg37OPBbEVjEwokcGdn7+aNQtQbKo= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70/go.mod h1:ItvqtvbC3K23FFET62ZwnkwtpbKZm8t8eMcWjmVVjD8= +github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/hashicorp/yamux v0.0.0-20151129044643-df949784da9e h1:R35YxgX/iWqsDjzoNO/7uFf8vjcgBIc02rvaV/Vr15c= +github.com/hashicorp/yamux v0.0.0-20151129044643-df949784da9e/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lsegal/gucumber v0.0.0-20160715015914-71608e2f6e76/go.mod h1:iavQQ1h5e4AcHzU3cx6tMnfjeEMe/cSSQwTyDKtJYjA= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.2 h1:F+DnWktyadxnOrohKLNUC9/GjFii5RJgY4GFG6ilggw= +github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v0.0.0-20170824190209-0ce7cd515f64 h1:Gf2TW4lDbrkrJLzvZgVEzrVKCjdhMPvucSrJvgmCT8Q= +github.com/mitchellh/cli v0.0.0-20170824190209-0ce7cd515f64/go.mod h1:oGumspjLm2kTyiT1QMGpFqRlmxnKHfCvhZEVnx+5UeE= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3 h1:dECZqiJYhKdj9QlLpiQaRDXHDXRTdiyZI3owdDGhlYY= +github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 h1:eQox4Rh4ewJF+mqYPxCkmBAirRnPaHEB26UkNuPyjlk= +github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= +github.com/mitchellh/go-testing-interface v0.0.0-20170430140722-477c2d05a845 h1:PylPolHRM66kPXPWfSBPw+wsvj/46cN9eJESpM4R3yc= +github.com/mitchellh/go-testing-interface v0.0.0-20170430140722-477c2d05a845/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992 h1:W7VHAEVflA5/eTyRvQ53Lz5j8bhRd1myHZlI/IZFvbU= +github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= +github.com/mitchellh/reflectwalk v0.0.0-20170508173806-8d802ff4ae93 h1:1100/b1WEWkub7eZ9uDlBMb/k5hrDR6iHDGOQqfrVXE= +github.com/mitchellh/reflectwalk v0.0.0-20170508173806-8d802ff4ae93/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v0.0.0-20170825064415-2100d1b06c06 h1:X8C6oeLHSCGVsws37WAd4yqGpi+MhPXjxUn02xrLlE4= +github.com/posener/complete v0.0.0-20170825064415-2100d1b06c06/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v0.0.0-20170321230731-5bf94b69c6b6 h1:7PFDlUz5/yGp8w21PuZGgEfN7ZF7OXvgAIDjwih82Gs= +github.com/satori/go.uuid v0.0.0-20170321230731-5bf94b69c6b6/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sky-uk/gonsx v0.0.0-20180122153724-c3caef9aee9b/go.mod h1:TZYud7GPIhCudvIHK6WxeGeawsoBQhl0ajiJ+irwS2Q= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20160615092844-d77da356e56a/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/zclconf/go-cty v0.0.0-20181129180422-88fbe721e0f8/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190426224007-b18a157db9e2/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec h1:MSeYjmyjucsFbecMTxg63ASg23lcSARP/kr9sClTFfk= +github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170818135134-eb71ad9bd329 h1:1HbsYqDgQeTUBGT5OO+Chqw1TqtjU0tc2BRrEWx85AI= +golang.org/x/crypto v0.0.0-20170818135134-eb71ad9bd329/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170824174428-57efc9c3d9f9 h1:5dWUD2H++BOa8+DJzqUYOpTtpAWE6OZZJWhf1BgJR48= +golang.org/x/net v0.0.0-20170824174428-57efc9c3d9f9/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9 h1:pfyU+l9dEu0vZzDDMsdAKa1gZbJYEn6urYXj/+Xkz7s= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170821093353-07c182904dbd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170825072521-ac87088df8ef h1:MsmiU90vB2BugQDlhKHsmsI1ncYbpxi+UJJs8ooGPWY= +golang.org/x/text v0.0.0-20170825072521-ac87088df8ef/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0 h1:ZvI3lsq5AIkr7axxmT3tfwFlJVRFLqe6Fp0W03+MJ38= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v0.0.0-20170824170843-7db1564ba122 h1:LihRvEbXPizeTxBzMCcYnTrcKsWl8vo5wnfMPtzDrWM= +google.golang.org/grpc v0.0.0-20170824170843-7db1564ba122/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 2125378..0000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index 7534473..0000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with -// the given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := uint64(1< maxHostNum { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, num, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := i + 1; j < len(subnets); j++ { - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index 861a5f6..0000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,38 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum := big.NewInt(int64(num)) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e..0000000 --- a/vendor/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md deleted file mode 100644 index 26f42a2..0000000 --- a/vendor/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go deleted file mode 100644 index f9655a1..0000000 --- a/vendor/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,543 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) replaceEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - if idx < num && n.edges[idx].label == e.label { - n.edges[idx].node = e.node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -func (n *node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } - - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.replaceEdge(edge{ - label: search[0], - node: child, - }) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - var parent *node - var label byte - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - parent = n - label = search[0] - n = n.getEdge(label) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should delete this node from the parent - if parent != nil && len(n.edges) == 0 { - parent.delEdge(label) - } - - // Check if we should merge this node - if n != t.root && len(n.edges) == 1 { - n.mergeChild() - } - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - - return leaf.val, true -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s string) int { - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix string) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s string, v interface{}) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = nil - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - break - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 5f14d11..0000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 56fdfc2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 0202a00..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 59fa4a5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index 11c52c3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,222 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 710eb43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index b6432f1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index 788fe6e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,90 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint string - SigningRegion string - SigningName string -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 1313478..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,96 +0,0 @@ -package client - -import ( - "math/rand" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// service.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - minTime = 500 - } - - retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { - retryCount = 8 - } - - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - - if r.HTTPResponse.StatusCode >= 500 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true - } - return r.IsErrorThrottle() -} - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 1f39c91..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,108 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - - handlerFn := func(req *request.Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 4778056..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,12 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index d1f31f1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,470 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. -// -// // Create Session with MaxRetry configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpiont to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - UseDualStack *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetry configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go deleted file mode 100644 index 79f4268..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context.go +++ /dev/null @@ -1,71 +0,0 @@ -package aws - -import ( - "time" -) - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go deleted file mode 100644 index e8cf93d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.7 - -package aws - -import "time" - -// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This -// is copied to provide a 1.6 and 1.5 safe version of context that is compatible -// with Go 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go deleted file mode 100644 index 064f75c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 3b73a7d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,369 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 495e3ef..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,242 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 10 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b15..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index f298d65..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 42416fc..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,246 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "sync" - "time" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -// -// @readonly -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// A Credentials provides synchronous safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - m sync.Mutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index c397495..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,178 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index a4cec5c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,191 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.HTTPRequest.Header.Set("Accept", "application/json") - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, - ) - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index c14231a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,78 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - // - // @readonly - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - // - // @readonly - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 51e21e0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,150 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/go-ini/ini" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) - } - - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) - } - - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.Key("aws_session_token") - - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 4f5dab3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 4108e43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,298 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Printf("Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - roleOutput, err := p.Client.AssumeRole(input) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 07afe3b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,163 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, - }) -} - -const ( - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("http://169.254.170.2%s", uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { - errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb616..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 984407a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,162 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index 5b4379d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - } - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index 74f72de..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,133 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custRmIotDataService(p) - } - - return ps, nil -} - -func custAddS3DualStack(p *partition) { - if p.ID != "aws" { - return - } - - s, ok := p.Services["s3"] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services["s3"] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index ba0c07b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,2331 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. -) - -// AWS Standard partition's regions. -const ( - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). -) - -// Service identifiers -const ( - AcmServiceID = "acm" // Acm. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GreengrassServiceID = "greengrass" // Greengrass. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - S3ServiceID = "s3" // S3. - SdbServiceID = "sdb" // Sdb. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "EU (Frankfurt)", - }, - "eu-west-1": region{ - Description: "EU (Ireland)", - }, - "eu-west-2": region{ - Description: "EU (London)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - }, - Services: services{ - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index a0e9bc4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.Name) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.Name) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index 9c3eedb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,439 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !ok { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id string - p *partition -} - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} - for id := range p.p.Regions { - rs[id] = Region{ - id: id, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := map[string]Service{} - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index 13d968a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,303 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - hostname := e.Hostname - - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - if len(signingName) == 0 { - signingName = service - } - - return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 05e92df..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" . }} - - {{ range $_, $partition := . }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ template "service consts" . }} - - {{ template "endpoint resolvers" . }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index 5766361..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 3babb5a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index 271da43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !appengine,!plan9 - -package request - -import ( - "net" - "os" - "syscall" -) - -func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 802ac88..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,256 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - Complete HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.Complete.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f7960..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 02f07f4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,58 +0,0 @@ -package request - -import ( - "io" - "sync" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, 0) - - reader.buf = buf - return reader -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index 299dc37..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,575 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is recieved - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - r.ResetBody() -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.NotHoist = false - - if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err - } - } - - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request returning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody = newOffsetReader(r.Body, r.BodyStart) - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := computeBodyLength(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) - } - - var body io.ReadCloser - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == CanceledErrorCode { - return false - } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() - } - } - - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index 869b97a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index c32fc69..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.8 - -package request - -import ( - "net/http" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index a7365cd..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 307fa07..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 59de673..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,236 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// cont := true -// for p.Next() && cont { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// } -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - - started bool - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { - tokens = append(tokens, nil) - } - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 8d369c1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,161 +0,0 @@ -package request - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: struct{}{}, - ErrCodeRead: struct{}{}, -} - -type temporaryError interface { - Temporary() bool -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporaryError); ok { - return t.Temporary() - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - return IsErrorThrottle(r.Error) -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 4012462..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,234 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 22d2f80..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,287 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else if err := aws.SleepWithContext(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index ea7b886..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency - -Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. - -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. - -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. - - // Create Session - sess := session.Must(session.NewSession()) - - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) - - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) - - svc := s3.New(sess) - -Create Session With Option Overrides - -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. - -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ - // Options - })) - - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) - - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) - - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: session.SharedConfigEnable, - })) - -Adding Handlers - -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Println("Request: %s/%s, Payload: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - -Shared Config Fields - -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). - -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token - -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscrds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index 7357e54..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,188 +0,0 @@ -package session - -import ( - "os" - "strconv" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file - // that the SDK will use instead of the the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string -} - -var ( - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() envConfig { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() envConfig { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) envConfig { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) - - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - - return cfg -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - *dst = v - break - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 9f75d5a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,606 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ClientConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg := loadEnvConfig() - - if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) - } - return s - } - - return deprecatedNewSession(cfgs...) -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - CustomCABundle io.Reader -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() - } else { - envCfg = loadEnvConfig() - } - - if len(opts.Profile) > 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - if len(envCfg.SharedCredentialsFile) == 0 { - envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(envCfg.SharedConfigFile) == 0 { - envCfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -func deprecatedNewSession(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - return s -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) - if err != nil { - return nil, err - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - - return s, nil -} - -func loadCustomCABundle(s *Session, bundle io.Reader) error { - var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { - case *http.Transport: - t = v - default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) - } - } - if t == nil { - t = &http.Transport{} - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - s.Config.HTTPClient.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - // Configure credentials if not already set - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - } - - return nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, coping the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { - s = s.Copy(cfgs...) - - var resolved endpoints.ResolvedEndpoint - var err error - - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, - }, err -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index 09c8e5b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,295 +0,0 @@ -package session - -import ( - "fmt" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-ini/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - - // Additional Config fields - regionKey = `region` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` -) - -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string -} - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig - - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. - // - // region - Region string -} - -type sharedConfigFile struct { - Filename string - IniData *ini.File -} - -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { - return sharedConfig{}, err - } - - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - // Skip files which can't be opened and read for whatever reason - continue - } - - f, err := ini.Load(b) - if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: f, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} - } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { - return err - } - } - - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} - } - - cfg.AssumeRoleSource = &assumeRoleSrc - - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue - } - return err - } - } - - return nil -} - -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, err := file.IniData.GetSection(profile) - if err != nil { - // Fallback to to alternate profile name: profile - section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if err != nil { - return SharedConfigProfileNotExistsError{Profile: profile, Err: err} - } - } - - // Shared Credentials - akid := section.Key(accessKeyIDKey).String() - secret := section.Key(secretAccessKey).String() - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.Key(sessionTokenKey).String(), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - } - - // Assume Role - roleArn := section.Key(roleArnKey).String() - srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), - } - } - - // Region - if v := section.Key(regionKey).String(); len(v) > 0 { - cfg.Region = v - } - - return nil -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 244c86d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index bd082e9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index b7da95a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,761 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disales the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: exp != 0, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.Get() - if err != nil { - return http.Header{}, err - } - - ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now, opts...) - }, - } -} - -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildBodyDigest() - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, - } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) - - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host - } else { - headerValues[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - ctx.formattedTime, - ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { - hash = "UNSIGNED-PAYLOAD" - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) - } - if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -const doubleSpaces = " " - -var doubleSpaceBytes = []byte(doubleSpaces) - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) - - idx := strings.Index(trimmed, doubleSpaces) - if idx < 0 { - vals[i] = trimmed - continue - } - - buf := []byte(trimmed) - for idx > -1 { - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - idx - 1 - break - } - } - - if stripToIdx >= 0 { - // Find next double space - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx - } - } else { - idx = -1 - } - } - - vals[i] = string(buf) - } - return vals -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 0e2d864..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. -// -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index 6192b24..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 0210d27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index f8694c2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.10.4" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go deleted file mode 100644 index 2b279e6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import ( - "fmt" - "regexp" - "strings" -) - -// NormalizeEndpoint takes and endpoint and service API information to return a -// normalized endpoint and signing region. If the endpoint is not an empty string -// the service name and region will be used to look up the service's API endpoint. -// If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { - if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) - } - - return AddScheme(endpoint, disableSSL), "" -} - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { - derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - break - } - } - - return AddScheme(endpoint, disableSSL), signingRegion -} - -// Regular expression to determine if the endpoint string is prefixed with a scheme. -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. -func AddScheme(endpoint string, disableSSL bool) string { - if endpoint != "" && !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index 5f4991c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go deleted file mode 100644 index e995315..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ /dev/null @@ -1,88 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/data.iot": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/waf": { - Endpoint: "waf.amazonaws.com", - SigningRegion: "us-east-1", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "cn-north-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831df..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index 18169f0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) - return - } - - if r.ExpireTime == 0 { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 524ca95..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,237 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index e0f4d5a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,35 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index f214296..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,66 +0,0 @@ -package query - -import ( - "encoding/xml" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` -} - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) - return - } - - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 7161835..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,290 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - - } - - header.Add(prefix+key.String(), str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - var str string - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - case aws.JSONValue: - b, err := json.Marshal(value) - if err != nil { - return "", err - } - if tag.Get("location") == "header" { - str = base64.StdEncoding.EncodeToString(b) - } else { - str = string(b) - } - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 7a779ee..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,227 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) - } -} - -func unmarshalBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) - } - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) - } - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", - "failed to read response body", err) - return - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } -} - -func unmarshalLocationElements(r *request.Request, v reflect.Value) { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) - case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - } - } - if r.Error != nil { - return - } - } -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { - out[k[len(prefix):]] = &v[0] - } - } - r.Set(reflect.ValueOf(out)) - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { - if len(header) == 0 { - return nil - } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - b := []byte(header) - var err error - if tag.Get("location") == "header" { - b, err = base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go deleted file mode 100644 index 7bdf4c8..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package restxml provides RESTful XML serialization of AWS -// requests and responses. -package restxml - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go - -import ( - "bytes" - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// BuildHandler is a named request handler for building restxml protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} - -// Build builds a request payload for the REST XML protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - var buf bytes.Buffer - err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) - return - } - r.SetBufferBody(buf.Bytes()) - } -} - -// Unmarshal unmarshals a payload response for the REST XML protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, "") - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) - return - } - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST XML protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} - -// UnmarshalError unmarshals a response error for the REST XML protocol. -func UnmarshalError(r *request.Request) { - query.UnmarshalError(r) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index da1a681..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,21 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index 7091b45..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, false) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - fieldAdded := false - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - - fieldAdded = true - } - - if fieldAdded { // only append this child if we have one ore more valid members - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 8758462..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,260 +0,0 @@ -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" -) - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 3e970b6..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,147 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go deleted file mode 100644 index 244c86d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go deleted file mode 100644 index 4765800..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go +++ /dev/null @@ -1,465 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string - notHoist bool - signedHeaderVals http.Header -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *request.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Config.Credentials, - Debug: req.Config.LogLevel.Value(), - Logger: req.Config.Logger, - notHoist: req.NotHoist, - } - - req.Error = s.sign() - req.Time = s.Time - req.SignedHeaderVals = s.signedHeaderVals -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return nil - } - v4.Time = time.Now() - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - - unsignedHeaders := v4.Request.Header - if v4.isPresign { - if !v4.notHoist { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - v4.Query[k] = urlValues[k] - } - } - } - - v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if v4.signedHeaderVals == nil { - v4.signedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok { - // include additional values - v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - v4.signedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.signedHeaderVals[k], ",") - } - } - - v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - stripped := "" - found := false - str = strings.TrimSpace(str) - for _, c := range str { - if !found && c == ' ' { - stripped += string(c) - found = true - } else if c != ' ' { - stripped += string(c) - found = false - } - } - vals[i] = stripped - } - return vals -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go deleted file mode 100644 index b51e944..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ /dev/null @@ -1,134 +0,0 @@ -package waiter - -import ( - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides a collection of configuration values to setup a generated -// waiter code with. -type Config struct { - Name string - Delay int - MaxAttempts int - Operation string - Acceptors []WaitAcceptor -} - -// A WaitAcceptor provides the information needed to wait for an API operation -// to complete. -type WaitAcceptor struct { - Expected interface{} - Matcher string - State string - Argument string -} - -// A Waiter provides waiting for an operation to complete. -type Waiter struct { - Config - Client interface{} - Input interface{} -} - -// Wait waits for an operation to complete, expire max attempts, or fail. Error -// is returned if the operation fails. -func (w *Waiter) Wait() error { - client := reflect.ValueOf(w.Client) - in := reflect.ValueOf(w.Input) - method := client.MethodByName(w.Config.Operation + "Request") - - for i := 0; i < w.MaxAttempts; i++ { - res := method.Call([]reflect.Value{in}) - req := res[0].Interface().(*request.Request) - req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) - - err := req.Send() - for _, a := range w.Acceptors { - result := false - var vals []interface{} - switch a.Matcher { - case "pathAll", "path": - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case "pathAny": - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case "status": - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case "error": - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - case "pathList": - // ignored matcher - default: - logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", - w.Config.Operation, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - continue - } - - switch a.State { - case "success": - // waiter completed - return nil - case "failure": - // Waiter failure state triggered - return awserr.New("ResourceNotReady", - fmt.Sprintf("failed waiting for successful resource state"), err) - case "retry": - // clear the error and retry the operation - err = nil - default: - logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", - w.Config.Operation, a.State) - } - } - if err != nil { - return err - } - - time.Sleep(time.Second * time.Duration(w.Delay)) - } - - return awserr.New("ResourceNotReady", - fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) -} - -func logf(client reflect.Value, msg string, args ...interface{}) { - cfgVal := client.FieldByName("Config") - if !cfgVal.IsValid() { - return - } - if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { - cfg.Logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go deleted file mode 100644 index 0435398..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ /dev/null @@ -1,19245 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "fmt" - "io" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -const opAbortMultipartUpload = "AbortMultipartUpload" - -// AbortMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See AbortMultipartUpload for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the AbortMultipartUploadRequest method. -// req, resp := client.AbortMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { - op := &request.Operation{ - Name: opAbortMultipartUpload, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &AbortMultipartUploadInput{} - } - - output = &AbortMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// AbortMultipartUpload API operation for Amazon Simple Storage Service. -// -// Aborts a multipart upload. -// -// To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation AbortMultipartUpload for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchUpload "NoSuchUpload" -// The specified multipart upload does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - return out, req.Send() -} - -// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See AbortMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCompleteMultipartUpload = "CompleteMultipartUpload" - -// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See CompleteMultipartUpload for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CompleteMultipartUploadRequest method. -// req, resp := client.CompleteMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { - op := &request.Operation{ - Name: opCompleteMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CompleteMultipartUploadInput{} - } - - output = &CompleteMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CompleteMultipartUpload API operation for Amazon Simple Storage Service. -// -// Completes a multipart upload by assembling previously uploaded parts. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - return out, req.Send() -} - -// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CompleteMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCopyObject = "CopyObject" - -// CopyObjectRequest generates a "aws/request.Request" representing the -// client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See CopyObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CopyObjectRequest method. -// req, resp := client.CopyObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { - op := &request.Operation{ - Name: opCopyObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CopyObjectInput{} - } - - output = &CopyObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// CopyObject API operation for Amazon Simple Storage Service. -// -// Creates a copy of an object that is already stored in Amazon S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CopyObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon Glacier. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - return out, req.Send() -} - -// CopyObjectWithContext is the same as CopyObject with the addition of -// the ability to pass a context and additional request options. -// -// See CopyObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateBucket = "CreateBucket" - -// CreateBucketRequest generates a "aws/request.Request" representing the -// client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See CreateBucket for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CreateBucketRequest method. -// req, resp := client.CreateBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { - op := &request.Operation{ - Name: opCreateBucket, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &CreateBucketInput{} - } - - output = &CreateBucketOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBucket API operation for Amazon Simple Storage Service. -// -// Creates a new bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateBucket for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" -// The requested bucket name is not available. The bucket namespace is shared -// by all users of the system. Please select a different name and try again. -// -// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - return out, req.Send() -} - -// CreateBucketWithContext is the same as CreateBucket with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateMultipartUpload = "CreateMultipartUpload" - -// CreateMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See CreateMultipartUpload for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CreateMultipartUploadRequest method. -// req, resp := client.CreateMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { - op := &request.Operation{ - Name: opCreateMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?uploads", - } - - if input == nil { - input = &CreateMultipartUploadInput{} - } - - output = &CreateMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateMultipartUpload API operation for Amazon Simple Storage Service. -// -// Initiates a multipart upload and returns an upload ID. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - return out, req.Send() -} - -// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CreateMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucket = "DeleteBucket" - -// DeleteBucketRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucket for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketRequest method. -// req, resp := client.DeleteBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { - op := &request.Operation{ - Name: opDeleteBucket, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &DeleteBucketInput{} - } - - output = &DeleteBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucket API operation for Amazon Simple Storage Service. -// -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - return out, req.Send() -} - -// DeleteBucketWithContext is the same as DeleteBucket with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" - -// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketAnalyticsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. -// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketAnalyticsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &DeleteBucketAnalyticsConfigurationInput{} - } - - output = &DeleteBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketCors = "DeleteBucketCors" - -// DeleteBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketCors for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketCorsRequest method. -// req, resp := client.DeleteBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { - op := &request.Operation{ - Name: opDeleteBucketCors, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &DeleteBucketCorsInput{} - } - - output = &DeleteBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketCors API operation for Amazon Simple Storage Service. -// -// Deletes the cors configuration information set for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - return out, req.Send() -} - -// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" - -// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketInventoryConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketInventoryConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. -// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketInventoryConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &DeleteBucketInventoryConfigurationInput{} - } - - output = &DeleteBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an inventory configuration (identified by the inventory ID) from -// the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" - -// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketLifecycle for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketLifecycleRequest method. -// req, resp := client.DeleteBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { - op := &request.Operation{ - Name: opDeleteBucketLifecycle, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &DeleteBucketLifecycleInput{} - } - - output = &DeleteBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. -// -// Deletes the lifecycle configuration from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - return out, req.Send() -} - -// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" - -// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketMetricsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketMetricsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. -// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketMetricsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &DeleteBucketMetricsConfigurationInput{} - } - - output = &DeleteBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketPolicy = "DeleteBucketPolicy" - -// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketPolicy for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketPolicyRequest method. -// req, resp := client.DeleteBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { - op := &request.Operation{ - Name: opDeleteBucketPolicy, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &DeleteBucketPolicyInput{} - } - - output = &DeleteBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketPolicy API operation for Amazon Simple Storage Service. -// -// Deletes the policy from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - return out, req.Send() -} - -// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketReplication = "DeleteBucketReplication" - -// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketReplication for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketReplicationRequest method. -// req, resp := client.DeleteBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { - op := &request.Operation{ - Name: opDeleteBucketReplication, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &DeleteBucketReplicationInput{} - } - - output = &DeleteBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketReplication API operation for Amazon Simple Storage Service. -// -// Deletes the replication configuration from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - return out, req.Send() -} - -// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketTagging = "DeleteBucketTagging" - -// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketTaggingRequest method. -// req, resp := client.DeleteBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { - op := &request.Operation{ - Name: opDeleteBucketTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &DeleteBucketTaggingInput{} - } - - output = &DeleteBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketTagging API operation for Amazon Simple Storage Service. -// -// Deletes the tags from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - return out, req.Send() -} - -// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketWebsite = "DeleteBucketWebsite" - -// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteBucketWebsite for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketWebsiteRequest method. -// req, resp := client.DeleteBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { - op := &request.Operation{ - Name: opDeleteBucketWebsite, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &DeleteBucketWebsiteInput{} - } - - output = &DeleteBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketWebsite API operation for Amazon Simple Storage Service. -// -// This operation removes the website configuration from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - return out, req.Send() -} - -// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { - op := &request.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - output = &DeleteObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObject API operation for Amazon Simple Storage Service. -// -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - return out, req.Send() -} - -// DeleteObjectWithContext is the same as DeleteObject with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjectTagging = "DeleteObjectTagging" - -// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteObjectTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjectTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteObjectTaggingRequest method. -// req, resp := client.DeleteObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { - op := &request.Operation{ - Name: opDeleteObjectTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &DeleteObjectTaggingInput{} - } - - output = &DeleteObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObjectTagging API operation for Amazon Simple Storage Service. -// -// Removes the tag-set from an existing object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - return out, req.Send() -} - -// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjects = "DeleteObjects" - -// DeleteObjectsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DeleteObjects for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteObjectsRequest method. -// req, resp := client.DeleteObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { - op := &request.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", - } - - if input == nil { - input = &DeleteObjectsInput{} - } - - output = &DeleteObjectsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObjects API operation for Amazon Simple Storage Service. -// -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - return out, req.Send() -} - -// DeleteObjectsWithContext is the same as DeleteObjects with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" - -// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketAccelerateConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. -// req, resp := client.GetBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAccelerateConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &GetBucketAccelerateConfigurationInput{} - } - - output = &GetBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// Returns the accelerate configuration of a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAcl = "GetBucketAcl" - -// GetBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketAcl for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketAclRequest method. -// req, resp := client.GetBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { - op := &request.Operation{ - Name: opGetBucketAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &GetBucketAclInput{} - } - - output = &GetBucketAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAcl API operation for Amazon Simple Storage Service. -// -// Gets the access control policy for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - return out, req.Send() -} - -// GetBucketAclWithContext is the same as GetBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" - -// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketAnalyticsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAnalyticsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. -// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAnalyticsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &GetBucketAnalyticsConfigurationInput{} - } - - output = &GetBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketCors = "GetBucketCors" - -// GetBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketCors for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketCorsRequest method. -// req, resp := client.GetBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { - op := &request.Operation{ - Name: opGetBucketCors, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &GetBucketCorsInput{} - } - - output = &GetBucketCorsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketCors API operation for Amazon Simple Storage Service. -// -// Returns the cors configuration for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - return out, req.Send() -} - -// GetBucketCorsWithContext is the same as GetBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" - -// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketInventoryConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketInventoryConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketInventoryConfigurationRequest method. -// req, resp := client.GetBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketInventoryConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &GetBucketInventoryConfigurationInput{} - } - - output = &GetBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycle = "GetBucketLifecycle" - -// GetBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketLifecycle for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLifecycleRequest method. -// req, resp := client.GetBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleInput{} - } - - output = &GetBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycle API operation for Amazon Simple Storage Service. -// -// Deprecated, see the GetBucketLifecycleConfiguration operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" - -// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketLifecycleConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. -// req, resp := client.GetBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketLifecycleConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleConfigurationInput{} - } - - output = &GetBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// Returns the lifecycle configuration information set on the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLocation = "GetBucketLocation" - -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketLocation for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { - op := &request.Operation{ - Name: opGetBucketLocation, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", - } - - if input == nil { - input = &GetBucketLocationInput{} - } - - output = &GetBucketLocationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLocation API operation for Amazon Simple Storage Service. -// -// Returns the region the bucket resides in. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - return out, req.Send() -} - -// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLocation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLogging = "GetBucketLogging" - -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketLogging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { - op := &request.Operation{ - Name: opGetBucketLogging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &GetBucketLoggingInput{} - } - - output = &GetBucketLoggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLogging API operation for Amazon Simple Storage Service. -// -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - return out, req.Send() -} - -// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" - -// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketMetricsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketMetricsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketMetricsConfigurationRequest method. -// req, resp := client.GetBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketMetricsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &GetBucketMetricsConfigurationInput{} - } - - output = &GetBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotification = "GetBucketNotification" - -// GetBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketNotification for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketNotificationRequest method. -// req, resp := client.GetBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketNotification, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfigurationDeprecated{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotification API operation for Amazon Simple Storage Service. -// -// Deprecated, see the GetBucketNotificationConfiguration operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" - -// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketNotificationConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketNotificationConfigurationRequest method. -// req, resp := client.GetBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { - op := &request.Operation{ - Name: opGetBucketNotificationConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfiguration{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Returns the notification configuration of a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketPolicy = "GetBucketPolicy" - -// GetBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketPolicy for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketPolicyRequest method. -// req, resp := client.GetBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { - op := &request.Operation{ - Name: opGetBucketPolicy, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &GetBucketPolicyInput{} - } - - output = &GetBucketPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketPolicy API operation for Amazon Simple Storage Service. -// -// Returns the policy of a specified bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - return out, req.Send() -} - -// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketReplication = "GetBucketReplication" - -// GetBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketReplication for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketReplicationRequest method. -// req, resp := client.GetBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { - op := &request.Operation{ - Name: opGetBucketReplication, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &GetBucketReplicationInput{} - } - - output = &GetBucketReplicationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketReplication API operation for Amazon Simple Storage Service. -// -// Returns the replication configuration of a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - return out, req.Send() -} - -// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketRequestPayment = "GetBucketRequestPayment" - -// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketRequestPayment for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketRequestPaymentRequest method. -// req, resp := client.GetBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opGetBucketRequestPayment, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &GetBucketRequestPaymentInput{} - } - - output = &GetBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Returns the request payment configuration of a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketTagging = "GetBucketTagging" - -// GetBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketTaggingRequest method. -// req, resp := client.GetBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { - op := &request.Operation{ - Name: opGetBucketTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &GetBucketTaggingInput{} - } - - output = &GetBucketTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag set associated with the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - return out, req.Send() -} - -// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketVersioning = "GetBucketVersioning" - -// GetBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketVersioning for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketVersioningRequest method. -// req, resp := client.GetBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { - op := &request.Operation{ - Name: opGetBucketVersioning, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &GetBucketVersioningInput{} - } - - output = &GetBucketVersioningOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketVersioning API operation for Amazon Simple Storage Service. -// -// Returns the versioning state of a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - return out, req.Send() -} - -// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketWebsite = "GetBucketWebsite" - -// GetBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetBucketWebsite for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketWebsiteRequest method. -// req, resp := client.GetBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { - op := &request.Operation{ - Name: opGetBucketWebsite, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &GetBucketWebsiteInput{} - } - - output = &GetBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketWebsite API operation for Amazon Simple Storage Service. -// -// Returns the website configuration for a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - return out, req.Send() -} - -// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { - op := &request.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - output = &GetObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObject API operation for Amazon Simple Storage Service. -// -// Retrieves objects from Amazon S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - return out, req.Send() -} - -// GetObjectWithContext is the same as GetObject with the addition of -// the ability to pass a context and additional request options. -// -// See GetObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectAcl = "GetObjectAcl" - -// GetObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetObjectAcl for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectAclRequest method. -// req, resp := client.GetObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { - op := &request.Operation{ - Name: opGetObjectAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &GetObjectAclInput{} - } - - output = &GetObjectAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectAcl API operation for Amazon Simple Storage Service. -// -// Returns the access control list (ACL) of an object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectAcl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - return out, req.Send() -} - -// GetObjectAclWithContext is the same as GetObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTagging = "GetObjectTagging" - -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetObjectTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { - op := &request.Operation{ - Name: opGetObjectTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &GetObjectTaggingInput{} - } - - output = &GetObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag-set of an object. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - return out, req.Send() -} - -// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTorrent = "GetObjectTorrent" - -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetObjectTorrent for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { - op := &request.Operation{ - Name: opGetObjectTorrent, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", - } - - if input == nil { - input = &GetObjectTorrentInput{} - } - - output = &GetObjectTorrentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTorrent API operation for Amazon Simple Storage Service. -// -// Return torrent files from a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - return out, req.Send() -} - -// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTorrent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadBucket = "HeadBucket" - -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See HeadBucket for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { - op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &HeadBucketInput{} - } - - output = &HeadBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// HeadBucket API operation for Amazon Simple Storage Service. -// -// This operation is useful to determine if a bucket exists and you have permission -// to access it. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadBucket for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - return out, req.Send() -} - -// HeadBucketWithContext is the same as HeadBucket with the addition of -// the ability to pass a context and additional request options. -// -// See HeadBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadObject = "HeadObject" - -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See HeadObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { - op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &HeadObjectInput{} - } - - output = &HeadObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// HeadObject API operation for Amazon Simple Storage Service. -// -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses -// for more information on returned errors. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - return out, req.Send() -} - -// HeadObjectWithContext is the same as HeadObject with the addition of -// the ability to pass a context and additional request options. -// -// See HeadObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" - -// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListBucketAnalyticsConfigurations for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketAnalyticsConfigurations method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. -// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketAnalyticsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &ListBucketAnalyticsConfigurationsInput{} - } - - output = &ListBucketAnalyticsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the analytics configurations for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketAnalyticsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" - -// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListBucketInventoryConfigurations for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketInventoryConfigurations method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. -// req, resp := client.ListBucketInventoryConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketInventoryConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &ListBucketInventoryConfigurationsInput{} - } - - output = &ListBucketInventoryConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. -// -// Returns a list of inventory configurations for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketInventoryConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" - -// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListBucketMetricsConfigurations for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketMetricsConfigurations method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. -// req, resp := client.ListBucketMetricsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketMetricsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &ListBucketMetricsConfigurationsInput{} - } - - output = &ListBucketMetricsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the metrics configurations for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketMetricsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBuckets = "ListBuckets" - -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListBuckets for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { - op := &request.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListBucketsInput{} - } - - output = &ListBucketsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBuckets API operation for Amazon Simple Storage Service. -// -// Returns a list of all buckets owned by the authenticated sender of the request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - return out, req.Send() -} - -// ListBucketsWithContext is the same as ListBuckets with the addition of -// the ability to pass a context and additional request options. -// -// See ListBuckets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListMultipartUploads = "ListMultipartUploads" - -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListMultipartUploads for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { - op := &request.Operation{ - Name: opListMultipartUploads, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListMultipartUploadsInput{} - } - - output = &ListMultipartUploadsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListMultipartUploads API operation for Amazon Simple Storage Service. -// -// This operation lists in-progress multipart uploads. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - return out, req.Send() -} - -// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of -// the ability to pass a context and additional request options. -// -// See ListMultipartUploads for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListMultipartUploads method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { - return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListMultipartUploadsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListMultipartUploadsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListObjectVersions = "ListObjectVersions" - -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListObjectVersions for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { - op := &request.Operation{ - Name: opListObjectVersions, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectVersionsInput{} - } - - output = &ListObjectVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectVersions API operation for Amazon Simple Storage Service. -// -// Returns metadata about all of the versions of objects in a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - return out, req.Send() -} - -// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { - return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListObjects = "ListObjects" - -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListObjects for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { - op := &request.Operation{ - Name: opListObjects, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectsInput{} - } - - output = &ListObjectsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjects API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjects for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - return out, req.Send() -} - -// ListObjectsWithContext is the same as ListObjects with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjects method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { - return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsPagesWithContext same as ListObjectsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListObjectsV2 = "ListObjectsV2" - -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListObjectsV2 for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { - op := &request.Operation{ - Name: opListObjectsV2, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", - Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListObjectsV2Input{} - } - - output = &ListObjectsV2Output{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectsV2 API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectsV2 for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - return out, req.Send() -} - -// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectsV2 for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectsV2 method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. -// pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { - return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsV2Input - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsV2Request(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) - } - return p.Err() -} - -const opListParts = "ListParts" - -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See ListParts for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { - op := &request.Operation{ - Name: opListParts, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListPartsInput{} - } - - output = &ListPartsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListParts API operation for Amazon Simple Storage Service. -// -// Lists the parts that have been uploaded for a specific multipart upload. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - return out, req.Send() -} - -// ListPartsWithContext is the same as ListParts with the addition of -// the ability to pass a context and additional request options. -// -// See ListParts for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPartsPages iterates over the pages of a ListParts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListParts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListParts operation. -// pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { - return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPartsPagesWithContext same as ListPartsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPartsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPartsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" - -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketAccelerateConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &PutBucketAccelerateConfigurationInput{} - } - - output = &PutBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// Sets the accelerate configuration of an existing bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAcl = "PutBucketAcl" - -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketAcl for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { - op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &PutBucketAclInput{} - } - - output = &PutBucketAclOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAcl API operation for Amazon Simple Storage Service. -// -// Sets the permissions on a bucket using access control lists (ACL). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - return out, req.Send() -} - -// PutBucketAclWithContext is the same as PutBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" - -// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketAnalyticsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAnalyticsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. -// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAnalyticsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &PutBucketAnalyticsConfigurationInput{} - } - - output = &PutBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketCors = "PutBucketCors" - -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketCors for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { - op := &request.Operation{ - Name: opPutBucketCors, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &PutBucketCorsInput{} - } - - output = &PutBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketCors API operation for Amazon Simple Storage Service. -// -// Sets the cors configuration for a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - return out, req.Send() -} - -// PutBucketCorsWithContext is the same as PutBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" - -// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketInventoryConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketInventoryConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketInventoryConfigurationRequest method. -// req, resp := client.PutBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketInventoryConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &PutBucketInventoryConfigurationInput{} - } - - output = &PutBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycle = "PutBucketLifecycle" - -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketLifecycle for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleInput{} - } - - output = &PutBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLifecycle API operation for Amazon Simple Storage Service. -// -// Deprecated, see the PutBucketLifecycleConfiguration operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" - -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketLifecycleConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleConfigurationInput{} - } - - output = &PutBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLogging = "PutBucketLogging" - -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketLogging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { - op := &request.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &PutBucketLoggingInput{} - } - - output = &PutBucketLoggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLogging API operation for Amazon Simple Storage Service. -// -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of -// a bucket, you must be the bucket owner. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - return out, req.Send() -} - -// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" - -// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketMetricsConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketMetricsConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketMetricsConfigurationRequest method. -// req, resp := client.PutBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketMetricsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &PutBucketMetricsConfigurationInput{} - } - - output = &PutBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketNotification for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationInput{} - } - - output = &PutBucketNotificationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketNotification API operation for Amazon Simple Storage Service. -// -// Deprecated, see the PutBucketNotificationConfiguraiton operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" - -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketNotificationConfiguration for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationConfigurationInput{} - } - - output = &PutBucketNotificationConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Enables notifications of specified events for a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketPolicy = "PutBucketPolicy" - -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketPolicy for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { - op := &request.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &PutBucketPolicyInput{} - } - - output = &PutBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketPolicy API operation for Amazon Simple Storage Service. -// -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - return out, req.Send() -} - -// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketReplication = "PutBucketReplication" - -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketReplication for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { - op := &request.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &PutBucketReplicationInput{} - } - - output = &PutBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketReplication API operation for Amazon Simple Storage Service. -// -// Creates a new replication configuration (or replaces an existing one, if -// present). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - return out, req.Send() -} - -// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketRequestPayment = "PutBucketRequestPayment" - -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketRequestPayment for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &PutBucketRequestPaymentInput{} - } - - output = &PutBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketTagging = "PutBucketTagging" - -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { - op := &request.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &PutBucketTaggingInput{} - } - - output = &PutBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketTagging API operation for Amazon Simple Storage Service. -// -// Sets the tags for a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - return out, req.Send() -} - -// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketVersioning = "PutBucketVersioning" - -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketVersioning for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { - op := &request.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &PutBucketVersioningInput{} - } - - output = &PutBucketVersioningOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketVersioning API operation for Amazon Simple Storage Service. -// -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - return out, req.Send() -} - -// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutBucketWebsite for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { - op := &request.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - output = &PutBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketWebsite API operation for Amazon Simple Storage Service. -// -// Set the website configuration for a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - return out, req.Send() -} - -// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - output = &PutObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObject API operation for Amazon Simple Storage Service. -// -// Adds an object to a bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - return out, req.Send() -} - -// PutObjectWithContext is the same as PutObject with the addition of -// the ability to pass a context and additional request options. -// -// See PutObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectAcl = "PutObjectAcl" - -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutObjectAcl for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { - op := &request.Operation{ - Name: opPutObjectAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &PutObjectAclInput{} - } - - output = &PutObjectAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectAcl API operation for Amazon Simple Storage Service. -// -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectAcl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - return out, req.Send() -} - -// PutObjectAclWithContext is the same as PutObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectTagging = "PutObjectTagging" - -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See PutObjectTagging for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { - op := &request.Operation{ - Name: opPutObjectTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &PutObjectTaggingInput{} - } - - output = &PutObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectTagging API operation for Amazon Simple Storage Service. -// -// Sets the supplied tag-set to an object that already exists in a bucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - return out, req.Send() -} - -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See RestoreObject for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - output = &RestoreObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// RestoreObject API operation for Amazon Simple Storage Service. -// -// Restores an archived copy of an object back into Amazon S3 -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - return out, req.Send() -} - -// RestoreObjectWithContext is the same as RestoreObject with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUploadPart = "UploadPart" - -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See UploadPart for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { - op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartInput{} - } - - output = &UploadPartOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPart API operation for Amazon Simple Storage Service. -// -// Uploads a part in a multipart upload. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - return out, req.Send() -} - -// UploadPartWithContext is the same as UploadPart with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPart for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See UploadPartCopy for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { - op := &request.Operation{ - Name: opUploadPartCopy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartCopyInput{} - } - - output = &UploadPartCopyOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPartCopy API operation for Amazon Simple Storage Service. -// -// Uploads a part by copying data from an existing object as data source. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - return out, req.Send() -} - -// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPartCopy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` - - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. - DaysAfterInitiation *int64 `type:"integer"` -} - -// String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() -} - -// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. -func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { - s.DaysAfterInitiation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest -type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s AbortMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput -type AbortMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s AbortMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration -type AccelerateConfiguration struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s AccelerateConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccelerateConfiguration) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { - s.Status = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy -type AccessControlPolicy struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s AccessControlPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessControlPolicy) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrants sets the Grants field's value. -func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { - s.Owner = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator -type AnalyticsAndOperator struct { - _ struct{} `type:"structure"` - - // The prefix to use when evaluating an AND predicate. - Prefix *string `type:"string"` - - // The list of tags to use when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s AnalyticsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { - s.Tags = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration -type AnalyticsConfiguration struct { - _ struct{} `type:"structure"` - - // The filter used to describe a set of objects for analyses. A filter must - // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). - // If no filter is provided, all objects will be considered in any analysis. - Filter *AnalyticsFilter `type:"structure"` - - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. - // - // StorageClassAnalysis is a required field - StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` -} - -// String returns the string representation -func (s AnalyticsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.StorageClassAnalysis == nil { - invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.StorageClassAnalysis != nil { - if err := s.StorageClassAnalysis.Validate(); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { - s.Id = &v - return s -} - -// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. -func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { - s.StorageClassAnalysis = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination -type AnalyticsExportDestination struct { - _ struct{} `type:"structure"` - - // A destination signifying output to an S3 bucket. - // - // S3BucketDestination is a required field - S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation -func (s AnalyticsExportDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsExportDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsExportDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { - s.S3BucketDestination = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter -type AnalyticsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating an - // analytics filter. The operator must have at least two predicates. - And *AnalyticsAndOperator `type:"structure"` - - // The prefix to use when evaluating an analytics filter. - Prefix *string `type:"string"` - - // The tag to use when evaluating an analytics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s AnalyticsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { - s.Tag = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination -type AnalyticsS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The Amazon resource name (ARN) of the bucket to which data is exported. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. - BucketAccountId *string `type:"string"` - - // The file format used when exporting data to Amazon S3. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - - // The prefix to use when exporting data. The exported data begins with this - // prefix. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s AnalyticsS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { - s.Bucket = &v - return s -} - -// SetBucketAccountId sets the BucketAccountId field's value. -func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { - s.BucketAccountId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket -type Bucket struct { - _ struct{} `type:"structure"` - - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // The name of the bucket. - Name *string `type:"string"` -} - -// String returns the string representation -func (s Bucket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Bucket) GoString() string { - return s.String() -} - -// SetCreationDate sets the CreationDate field's value. -func (s *Bucket) SetCreationDate(v time.Time) *Bucket { - s.CreationDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *Bucket) SetName(v string) *Bucket { - s.Name = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration -type BucketLifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // Rules is a required field - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s BucketLifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { - s.Rules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus -type BucketLoggingStatus struct { - _ struct{} `type:"structure"` - - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s BucketLoggingStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { - s.LoggingEnabled = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration -type CORSConfiguration struct { - _ struct{} `type:"structure"` - - // CORSRules is a required field - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s CORSConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) - } - if s.CORSRules != nil { - for i, v := range s.CORSRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCORSRules sets the CORSRules field's value. -func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { - s.CORSRules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule -type CORSRule struct { - _ struct{} `type:"structure"` - - // Specifies which headers are allowed in a pre-flight OPTIONS request. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. - // - // AllowedMethods is a required field - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` - - // One or more origins you want customers to be able to access the bucket from. - // - // AllowedOrigins is a required field - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` - - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s CORSRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) - } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAllowedHeaders sets the AllowedHeaders field's value. -func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { - s.AllowedHeaders = v - return s -} - -// SetAllowedMethods sets the AllowedMethods field's value. -func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { - s.AllowedMethods = v - return s -} - -// SetAllowedOrigins sets the AllowedOrigins field's value. -func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { - s.AllowedOrigins = v - return s -} - -// SetExposeHeaders sets the ExposeHeaders field's value. -func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { - s.ExposeHeaders = v - return s -} - -// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. -func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { - s.MaxAgeSeconds = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` - - CloudFunction *string `type:"string"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - InvocationRole *string `type:"string"` -} - -// String returns the string representation -func (s CloudFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { - return s.String() -} - -// SetCloudFunction sets the CloudFunction field's value. -func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { - s.CloudFunction = &v - return s -} - -// SetEvent sets the Event field's value. -func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { - s.Id = &v - return s -} - -// SetInvocationRole sets the InvocationRole field's value. -func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { - s.InvocationRole = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix -type CommonPrefix struct { - _ struct{} `type:"structure"` - - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CommonPrefix) GoString() string { - return s.String() -} - -// SetPrefix sets the Prefix field's value. -func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest -type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s CompleteMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { - s.Key = &v - return s -} - -// SetMultipartUpload sets the MultipartUpload field's value. -func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { - s.MultipartUpload = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput -type CompleteMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - Bucket *string `type:"string"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - Key *string `min:"1" type:"string"` - - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { - s.Bucket = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { - s.Expiration = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { - s.Key = &v - return s -} - -// SetLocation sets the Location field's value. -func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { - s.Location = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload -type CompletedMultipartUpload struct { - _ struct{} `type:"structure"` - - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s CompletedMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { - return s.String() -} - -// SetParts sets the Parts field's value. -func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { - s.Parts = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart -type CompletedPart struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` -} - -// String returns the string representation -func (s CompletedPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedPart) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CompletedPart) SetETag(v string) *CompletedPart { - s.ETag = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { - s.PartNumber = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition -type Condition struct { - _ struct{} `type:"structure"` - - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` -} - -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() -} - -// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. -func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { - s.HttpErrorCodeReturnedEquals = &v - return s -} - -// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. -func (s *Condition) SetKeyPrefixEquals(v string) *Condition { - s.KeyPrefixEquals = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest -type CopyObjectInput struct { - _ struct{} `type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. - TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CopyObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { - s.Bucket = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { - s.ContentType = &v - return s -} - -// SetCopySource sets the CopySource field's value. -func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { - s.CopySourceSSECustomerKey = &v - return s -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { - s.Metadata = v - return s -} - -// SetMetadataDirective sets the MetadataDirective field's value. -func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { - s.MetadataDirective = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { - s.Tagging = &v - return s -} - -// SetTaggingDirective sets the TaggingDirective field's value. -func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { - s.TaggingDirective = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` - - CopyObjectResult *CopyObjectResult `type:"structure"` - - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CopyObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectOutput) GoString() string { - return s.String() -} - -// SetCopyObjectResult sets the CopyObjectResult field's value. -func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { - s.CopyObjectResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { - s.CopySourceVersionId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult -type CopyObjectResult struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectResult) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { - s.LastModified = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult -type CopyPartResult struct { - _ struct{} `type:"structure"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CopyPartResult) SetETag(v string) *CopyPartResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { - s.LastModified = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration -type CreateBucketConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { - s.LocationConstraint = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest -type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation -func (s CreateBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { - s.Bucket = &v - return s -} - -// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. -func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { - s.CreateBucketConfiguration = v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { - s.GrantWriteACP = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput -type CreateBucketOutput struct { - _ struct{} `type:"structure"` - - Location *string `location:"header" locationName:"Location" type:"string"` -} - -// String returns the string representation -func (s CreateBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketOutput) GoString() string { - return s.String() -} - -// SetLocation sets the Location field's value. -func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { - s.Location = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest -type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { - s.Bucket = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { - s.ContentType = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { - s.Metadata = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { - s.StorageClass = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { - s.WebsiteRedirectLocation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `locationName:"Bucket" type:"string"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // ID for the initiated multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { - s.Key = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete -type Delete struct { - _ struct{} `type:"structure"` - - // Objects is a required field - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` -} - -// String returns the string representation -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Delete) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) - } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetObjects sets the Objects field's value. -func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { - s.Objects = v - return s -} - -// SetQuiet sets the Quiet field's value. -func (s *Delete) SetQuiet(v bool) *Delete { - s.Quiet = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest -type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket from which an analytics configuration is deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput -type DeleteBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest -type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput -type DeleteBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest -type DeleteBucketInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest -type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the inventory configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput -type DeleteBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest -type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput -type DeleteBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest -type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the metrics configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput -type DeleteBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput -type DeleteBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest -type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput -type DeleteBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest -type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest -type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest -type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeleteMarkerEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { - return s.String() -} - -// SetIsLatest sets the IsLatest field's value. -func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { - s.Owner = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest -type DeleteObjectInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { - s.Key = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { - s.RequestCharged = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest -type DeleteObjectTaggingInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The versionId of the object that the tag-set will be removed from. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput -type DeleteObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // The versionId of the object the tag-set was removed from. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest -type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s DeleteObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { - s.Bucket = &v - return s -} - -// SetDelete sets the Delete field's value. -func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { - s.Delete = v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { - s.RequestPayer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput -type DeleteObjectsOutput struct { - _ struct{} `type:"structure"` - - Deleted []*DeletedObject `type:"list" flattened:"true"` - - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s DeleteObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { - return s.String() -} - -// SetDeleted sets the Deleted field's value. -func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { - s.Deleted = v - return s -} - -// SetErrors sets the Errors field's value. -func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { - s.Errors = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject -type DeletedObject struct { - _ struct{} `type:"structure"` - - DeleteMarker *bool `type:"boolean"` - - DeleteMarkerVersionId *string `type:"string"` - - Key *string `min:"1" type:"string"` - - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeletedObject) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletedObject) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { - s.DeleteMarker = &v - return s -} - -// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. -func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { - s.DeleteMarkerVersionId = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeletedObject) SetKey(v string) *DeletedObject { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeletedObject) SetVersionId(v string) *DeletedObject { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination -type Destination struct { - _ struct{} `type:"structure"` - - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *Destination) SetBucket(v string) *Destination { - s.Bucket = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Destination) SetStorageClass(v string) *Destination { - s.StorageClass = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error -type Error struct { - _ struct{} `type:"structure"` - - Code *string `type:"string"` - - Key *string `min:"1" type:"string"` - - Message *string `type:"string"` - - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s Error) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Error) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *Error) SetCode(v string) *Error { - s.Code = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Error) SetKey(v string) *Error { - s.Key = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *Error) SetMessage(v string) *Error { - s.Message = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *Error) SetVersionId(v string) *Error { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument -type ErrorDocument struct { - _ struct{} `type:"structure"` - - // The object key name to use when a 4XX class error occurs. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ErrorDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ErrorDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { - s.Key = &v - return s -} - -// Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule -type FilterRule struct { - _ struct{} `type:"structure"` - - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Name *string `type:"string" enum:"FilterRuleName"` - - Value *string `type:"string"` -} - -// String returns the string representation -func (s FilterRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterRule) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *FilterRule) SetName(v string) *FilterRule { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *FilterRule) SetValue(v string) *FilterRule { - s.Value = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest -type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` - - // Name of the bucket for which the accelerate configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput -type GetBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { - s.Status = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest -type GetBucketAclInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { - s.Owner = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest -type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket from which an analytics configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput -type GetBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { - s.AnalyticsConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest -type GetBucketCorsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput -type GetBucketCorsOutput struct { - _ struct{} `type:"structure"` - - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsOutput) GoString() string { - return s.String() -} - -// SetCORSRules sets the CORSRules field's value. -func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { - s.CORSRules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest -type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the inventory configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput -type GetBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` - - // Specifies the inventory configuration. - InventoryConfiguration *InventoryConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { - s.InventoryConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` - - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { - s.Rules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest -type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput -type GetBucketLifecycleOutput struct { - _ struct{} `type:"structure"` - - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { - s.Rules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest -type GetBucketLocationInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` - - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { - s.LocationConstraint = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest -type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` - - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { - return s.String() -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { - s.LoggingEnabled = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest -type GetBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the metrics configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput -type GetBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` - - // Specifies the metrics configuration. - MetricsConfiguration *MetricsConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { - s.MetricsConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest -type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` - - // Name of the bucket to get the notification configuration for. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest -type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` - - // The bucket policy as a JSON document. - Policy *string `type:"string"` -} - -// String returns the string representation -func (s GetBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { - s.Policy = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest -type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { - return s.String() -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { - s.ReplicationConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest -type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput -type GetBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -// SetPayer sets the Payer field's value. -func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { - s.Payer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest -type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` - - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { - s.TagSet = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest -type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { - s.Status = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest -type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { - s.RoutingRules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest -type GetObjectAclInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { - s.Owner = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest -type GetObjectInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { - s.Bucket = &v - return s -} - -// SetIfMatch sets the IfMatch field's value. -func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectInput) SetKey(v string) *GetObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { - s.RequestPayer = &v - return s -} - -// SetResponseCacheControl sets the ResponseCacheControl field's value. -func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { - s.ResponseCacheControl = &v - return s -} - -// SetResponseContentDisposition sets the ResponseContentDisposition field's value. -func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { - s.ResponseContentDisposition = &v - return s -} - -// SetResponseContentEncoding sets the ResponseContentEncoding field's value. -func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { - s.ResponseContentEncoding = &v - return s -} - -// SetResponseContentLanguage sets the ResponseContentLanguage field's value. -func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { - s.ResponseContentLanguage = &v - return s -} - -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s -} - -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The number of tags, if any, on the object. - TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetBody sets the Body field's value. -func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { - s.Body = v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentRange sets the ContentRange field's value. -func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { - s.ContentRange = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { - s.MissingMeta = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { - s.StorageClass = &v - return s -} - -// SetTagCount sets the TagCount field's value. -func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { - s.TagCount = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest -type GetObjectTaggingInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput -type GetObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` - - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s GetObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { - s.TagSet = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest -type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s GetObjectTorrentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { - s.RequestPayer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - Body io.ReadCloser `type:"blob"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectTorrentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { - return s.String() -} - -// SetBody sets the Body field's value. -func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { - s.Body = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters -type GlacierJobParameters struct { - _ struct{} `type:"structure"` - - // Glacier retrieval tier at which the restore will be processed. - // - // Tier is a required field - Tier *string `type:"string" required:"true" enum:"Tier"` -} - -// String returns the string representation -func (s GlacierJobParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GlacierJobParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlacierJobParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} - if s.Tier == nil { - invalidParams.Add(request.NewErrParamRequired("Tier")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTier sets the Tier field's value. -func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { - s.Tier = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant -type Grant struct { - _ struct{} `type:"structure"` - - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` -} - -// String returns the string representation -func (s Grant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *Grant) SetGrantee(v *Grantee) *Grant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *Grant) SetPermission(v string) *Grant { - s.Permission = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - EmailAddress *string `type:"string"` - - // The canonical user ID of the grantee. - ID *string `type:"string"` - - // Type of grantee - // - // Type is a required field - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` - - // URI of the grantee group. - URI *string `type:"string"` -} - -// String returns the string representation -func (s Grantee) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grantee) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Grantee) SetDisplayName(v string) *Grantee { - s.DisplayName = &v - return s -} - -// SetEmailAddress sets the EmailAddress field's value. -func (s *Grantee) SetEmailAddress(v string) *Grantee { - s.EmailAddress = &v - return s -} - -// SetID sets the ID field's value. -func (s *Grantee) SetID(v string) *Grantee { - s.ID = &v - return s -} - -// SetType sets the Type field's value. -func (s *Grantee) SetType(v string) *Grantee { - s.Type = &v - return s -} - -// SetURI sets the URI field's value. -func (s *Grantee) SetURI(v string) *Grantee { - s.URI = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest -type HeadBucketInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s HeadBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput -type HeadBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest -type HeadObjectInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s HeadObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { - s.Bucket = &v - return s -} - -// SetIfMatch sets the IfMatch field's value. -func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput -type HeadObjectOutput struct { - _ struct{} `type:"structure"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s HeadObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { - s.MissingMeta = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument -type IndexDocument struct { - _ struct{} `type:"structure"` - - // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. - // - // Suffix is a required field - Suffix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s IndexDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IndexDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSuffix sets the Suffix field's value. -func (s *IndexDocument) SetSuffix(v string) *IndexDocument { - s.Suffix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator -type Initiator struct { - _ struct{} `type:"structure"` - - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` -} - -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Initiator) SetDisplayName(v string) *Initiator { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Initiator) SetID(v string) *Initiator { - s.ID = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration -type InventoryConfiguration struct { - _ struct{} `type:"structure"` - - // Contains information about where to publish the inventory results. - // - // Destination is a required field - Destination *InventoryDestination `type:"structure" required:"true"` - - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter `type:"structure"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Specifies which object version(s) to included in the inventory results. - // - // IncludedObjectVersions is a required field - IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - - // Specifies whether the inventory is enabled or disabled. - // - // IsEnabled is a required field - IsEnabled *bool `type:"boolean" required:"true"` - - // Contains the optional fields that are included in the inventory results. - OptionalFields []*string `locationNameList:"Field" type:"list"` - - // Specifies the schedule for generating inventory results. - // - // Schedule is a required field - Schedule *InventorySchedule `type:"structure" required:"true"` -} - -// String returns the string representation -func (s InventoryConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.IncludedObjectVersions == nil { - invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) - } - if s.IsEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("IsEnabled")) - } - if s.Schedule == nil { - invalidParams.Add(request.NewErrParamRequired("Schedule")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.Schedule != nil { - if err := s.Schedule.Validate(); err != nil { - invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { - s.Destination = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { - s.Id = &v - return s -} - -// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. -func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { - s.IncludedObjectVersions = &v - return s -} - -// SetIsEnabled sets the IsEnabled field's value. -func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { - s.IsEnabled = &v - return s -} - -// SetOptionalFields sets the OptionalFields field's value. -func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { - s.OptionalFields = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { - s.Schedule = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination -type InventoryDestination struct { - _ struct{} `type:"structure"` - - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // S3BucketDestination is a required field - S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation -func (s InventoryDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { - s.S3BucketDestination = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter -type InventoryFilter struct { - _ struct{} `type:"structure"` - - // The prefix that an object must have to be included in the inventory results. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s InventoryFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination -type InventoryS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The ID of the account that owns the destination bucket. - AccountId *string `type:"string"` - - // The Amazon resource name (ARN) of the bucket where inventory results will - // be published. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // Specifies the output format of the inventory results. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"InventoryFormat"` - - // The prefix that is prepended to all inventory results. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s InventoryS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { - s.AccountId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { - s.Bucket = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule -type InventorySchedule struct { - _ struct{} `type:"structure"` - - // Specifies how frequently inventory results are produced. - // - // Frequency is a required field - Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` -} - -// String returns the string representation -func (s InventorySchedule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventorySchedule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventorySchedule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} - if s.Frequency == nil { - invalidParams.Add(request.NewErrParamRequired("Frequency")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFrequency sets the Frequency field's value. -func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { - s.Frequency = &v - return s -} - -// Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter -type KeyFilter struct { - _ struct{} `type:"structure"` - - // A list of containers for key value pair that defines the criteria for the - // filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s KeyFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s KeyFilter) GoString() string { - return s.String() -} - -// SetFilterRules sets the FilterRules field's value. -func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { - s.FilterRules = v - return s -} - -// Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration -type LambdaFunctionConfiguration struct { - _ struct{} `type:"structure"` - - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { - s.Id = &v - return s -} - -// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. -func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { - s.LambdaFunctionArn = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration -type LifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // Rules is a required field - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { - s.Rules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration -type LifecycleExpiration struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` -} - -// String returns the string representation -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleExpiration) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { - s.Days = &v - return s -} - -// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. -func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { - s.ExpiredObjectDeleteMarker = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule -type LifecycleRule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. - Filter *LifecycleRuleFilter `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` - - // Prefix identifying one or more objects to which the rule applies. This is - // deprecated; use Filter instead. - Prefix *string `deprecated:"true" type:"string"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s LifecycleRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { - s.Expiration = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { - s.Filter = v - return s -} - -// SetID sets the ID field's value. -func (s *LifecycleRule) SetID(v string) *LifecycleRule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. -func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { - s.NoncurrentVersionTransitions = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { - s.Status = &v - return s -} - -// SetTransitions sets the Transitions field's value. -func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { - s.Transitions = v - return s -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or -// more predicates. The Lifecycle Rule will apply to any object matching all -// of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator -type LifecycleRuleAndOperator struct { - _ struct{} `type:"structure"` - - Prefix *string `type:"string"` - - // All of these tags must exist in the object's tag set in order for the rule - // to apply. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s LifecycleRuleAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRuleAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { - s.Tags = v - return s -} - -// The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter -type LifecycleRuleFilter struct { - _ struct{} `type:"structure"` - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or - // more predicates. The Lifecycle Rule will apply to any object matching all - // of the predicates configured inside the And operator. - And *LifecycleRuleAndOperator `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string"` - - // This tag must exist in the object's tag set in order for the rule to apply. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s LifecycleRuleFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRuleFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { - s.Tag = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest -type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket from which analytics configurations are retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { - s.Bucket = &v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput -type ListBucketAnalyticsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - - // The ContinuationToken that represents where this request began. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of analytics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { - s.AnalyticsConfigurationList = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest -type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the inventory configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketInventoryConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketInventoryConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { - s.Bucket = &v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput -type ListBucketInventoryConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string `type:"string"` - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. - IsTruncated *bool `type:"boolean"` - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketInventoryConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { - s.InventoryConfigurationList = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest -type ListBucketMetricsConfigurationsInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the metrics configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker that is used to continue a metrics configuration listing that - // has been truncated. Use the NextContinuationToken from a previously truncated - // list response to continue the listing. The continuation token is an opaque - // value that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketMetricsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketMetricsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketMetricsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { - s.Bucket = &v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput -type ListBucketMetricsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The marker that is used as a starting point for this metrics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of metrics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The list of metrics configurations for a bucket. - MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` - - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketMetricsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketMetricsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { - s.MetricsConfigurationList = v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput -type ListBucketsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsInput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput -type ListBucketsOutput struct { - _ struct{} `type:"structure"` - - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsOutput) GoString() string { - return s.String() -} - -// SetBuckets sets the Buckets field's value. -func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { - s.Buckets = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { - s.Owner = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest -type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListMultipartUploadsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { - s.Bucket = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { - s.EncodingType = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { - s.MaxUploads = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { - s.UploadIdMarker = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` - - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` - - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListMultipartUploadsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { - s.Bucket = &v - return s -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { - s.CommonPrefixes = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { - s.MaxUploads = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.NextUploadIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.UploadIdMarker = &v - return s -} - -// SetUploads sets the Uploads field's value. -func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { - s.Uploads = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest -type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListObjectVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { - s.Bucket = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { - s.EncodingType = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { - s.VersionIdMarker = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput -type ListObjectVersionsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last Key returned in a truncated response. - KeyMarker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // Use this value for the key marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // Use this value for the next version id marker parameter in a subsequent request. - NextVersionIdMarker *string `type:"string"` - - Prefix *string `type:"string"` - - VersionIdMarker *string `type:"string"` - - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListObjectVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { - s.CommonPrefixes = v - return s -} - -// SetDeleteMarkers sets the DeleteMarkers field's value. -func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { - s.DeleteMarkers = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { - s.Name = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { - s.NextVersionIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { - s.VersionIdMarker = &v - return s -} - -// SetVersions sets the Versions field's value. -func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { - s.Versions = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest -type ListObjectsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s ListObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { - s.Bucket = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { - s.EncodingType = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { - s.RequestPayer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput -type ListObjectsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Contents []*Object `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - Marker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` - - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { - s.Contents = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { - s.IsTruncated = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { - s.Name = &v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { - s.NextMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request -type ListObjectsV2Input struct { - _ struct{} `type:"structure"` - - // Name of the bucket to list. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request in V2 style. Bucket owners need not specify this parameter - // in their requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Input) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { - s.Bucket = &v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { - s.EncodingType = &v - return s -} - -// SetFetchOwner sets the FetchOwner field's value. -func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { - s.FetchOwner = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { - s.RequestPayer = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { - s.StartAfter = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output -type ListObjectsV2Output struct { - _ struct{} `type:"structure"` - - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `type:"integer"` - - // Name of the bucket to list. - Name *string `type:"string"` - - // NextContinuationToken is sent when isTruncated is true which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `type:"string"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Output) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { - s.Contents = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { - s.IsTruncated = &v - return s -} - -// SetKeyCount sets the KeyCount field's value. -func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { - s.KeyCount = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { - s.Name = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { - s.NextContinuationToken = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { - s.Prefix = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { - s.StartAfter = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest -type ListPartsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID identifying the multipart upload whose parts are being listed. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s ListPartsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ListPartsInput) SetKey(v string) *ListPartsInput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { - s.MaxParts = &v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { - s.PartNumberMarker = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput -type ListPartsOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - Owner *Owner `type:"structure"` - - // Part number after which listing begins. - PartNumberMarker *int64 `type:"integer"` - - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { - s.Bucket = &v - return s -} - -// SetInitiator sets the Initiator field's value. -func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { - s.Initiator = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { - s.IsTruncated = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { - s.MaxParts = &v - return s -} - -// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. -func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { - s.NextPartNumberMarker = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { - s.Owner = v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { - s.PartNumberMarker = &v - return s -} - -// SetParts sets the Parts field's value. -func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { - s.Parts = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { - s.RequestCharged = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled -type LoggingEnabled struct { - _ struct{} `type:"structure"` - - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` - - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` -} - -// String returns the string representation -func (s LoggingEnabled) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LoggingEnabled) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTargetBucket sets the TargetBucket field's value. -func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { - s.TargetBucket = &v - return s -} - -// SetTargetGrants sets the TargetGrants field's value. -func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { - s.TargetGrants = v - return s -} - -// SetTargetPrefix sets the TargetPrefix field's value. -func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { - s.TargetPrefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator -type MetricsAndOperator struct { - _ struct{} `type:"structure"` - - // The prefix used when evaluating an AND predicate. - Prefix *string `type:"string"` - - // The list of tags used when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s MetricsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { - s.Tags = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration -type MetricsConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies a metrics configuration filter. The metrics configuration will - // only include objects that meet the filter's criteria. A filter must be a - // prefix, a tag, or a conjunction (MetricsAndOperator). - Filter *MetricsFilter `type:"structure"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s MetricsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter -type MetricsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *MetricsAndOperator `type:"structure"` - - // The prefix used when evaluating a metrics filter. - Prefix *string `type:"string"` - - // The tag used when evaluating a metrics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s MetricsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { - s.Tag = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload -type MultipartUpload struct { - _ struct{} `type:"structure"` - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - Owner *Owner `type:"structure"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MultipartUpload) GoString() string { - return s.String() -} - -// SetInitiated sets the Initiated field's value. -func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { - s.Initiated = &v - return s -} - -// SetInitiator sets the Initiator field's value. -func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { - s.Initiator = v - return s -} - -// SetKey sets the Key field's value. -func (s *MultipartUpload) SetKey(v string) *MultipartUpload { - s.Key = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { - s.Owner = v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { - s.UploadId = &v - return s -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration -type NoncurrentVersionExpiration struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) - NoncurrentDays *int64 `type:"integer"` -} - -// String returns the string representation -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { - s.NoncurrentDays = &v - return s -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) - NoncurrentDays *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { - return s.String() -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { - s.NoncurrentDays = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { - s.StorageClass = &v - return s -} - -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. -func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { - s.LambdaFunctionConfigurations = v - return s -} - -// SetQueueConfigurations sets the QueueConfigurations field's value. -func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { - s.QueueConfigurations = v - return s -} - -// SetTopicConfigurations sets the TopicConfigurations field's value. -func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { - s.TopicConfigurations = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated -type NotificationConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` - - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` - - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { - s.CloudFunctionConfiguration = v - return s -} - -// SetQueueConfiguration sets the QueueConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.QueueConfiguration = v - return s -} - -// SetTopicConfiguration sets the TopicConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.TopicConfiguration = v - return s -} - -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter -type NotificationConfigurationFilter struct { - _ struct{} `type:"structure"` - - // Container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationFilter) GoString() string { - return s.String() -} - -// SetKey sets the Key field's value. -func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { - s.Key = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object -type Object struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - Key *string `min:"1" type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` -} - -// String returns the string representation -func (s Object) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Object) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *Object) SetETag(v string) *Object { - s.ETag = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Object) SetKey(v string) *Object { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Object) SetLastModified(v time.Time) *Object { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *Object) SetOwner(v *Owner) *Object { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *Object) SetSize(v int64) *Object { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Object) SetStorageClass(v string) *Object { - s.StorageClass = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier -type ObjectIdentifier struct { - _ struct{} `type:"structure"` - - // Key name of the object to delete. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectIdentifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectIdentifier) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion -type ObjectVersion struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectVersion) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *ObjectVersion) SetETag(v string) *ObjectVersion { - s.ETag = &v - return s -} - -// SetIsLatest sets the IsLatest field's value. -func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ObjectVersion) SetKey(v string) *ObjectVersion { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner -type Owner struct { - _ struct{} `type:"structure"` - - DisplayName *string `type:"string"` - - ID *string `type:"string"` -} - -// String returns the string representation -func (s Owner) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Owner) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Owner) SetDisplayName(v string) *Owner { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Owner) SetID(v string) *Owner { - s.ID = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part -type Part struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size of the uploaded part data. - Size *int64 `type:"integer"` -} - -// String returns the string representation -func (s Part) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Part) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *Part) SetETag(v string) *Part { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Part) SetLastModified(v time.Time) *Part { - s.LastModified = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *Part) SetPartNumber(v int64) *Part { - s.PartNumber = &v - return s -} - -// SetSize sets the Size field's value. -func (s *Part) SetSize(v int64) *Part { - s.Size = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` - - // Specifies the Accelerate Configuration you want to set for the bucket. - // - // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Name of the bucket for which the accelerate configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. -func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { - s.AccelerateConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput -type PutBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest -type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation -func (s PutBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { - s.Bucket = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { - s.GrantWriteACP = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput -type PutBucketAclOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest -type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - // - // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The name of the bucket to which an analytics configuration is stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} - if s.AnalyticsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.AnalyticsConfiguration != nil { - if err := s.AnalyticsConfiguration.Validate(); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { - s.AnalyticsConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput -type PutBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest -type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) - } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { - s.Bucket = &v - return s -} - -// SetCORSConfiguration sets the CORSConfiguration field's value. -func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { - s.CORSConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput -type PutBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest -type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` - - // The name of the bucket where the inventory configuration will be stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. - // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.InventoryConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) - } - if s.InventoryConfiguration != nil { - if err := s.InventoryConfiguration.Validate(); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { - s.InventoryConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput -type PutBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { - s.LifecycleConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput -type PutBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest -type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { - s.Bucket = &v - return s -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { - s.LifecycleConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest -type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) - } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { - s.Bucket = &v - return s -} - -// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. -func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { - s.BucketLoggingStatus = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest -type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` - - // The name of the bucket for which the metrics configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the metrics configuration. - // - // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.MetricsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) - } - if s.MetricsConfiguration != nil { - if err := s.MetricsConfiguration.Validate(); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { - s.MetricsConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput -type PutBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest -type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { - s.Bucket = &v - return s -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { - s.NotificationConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest -type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { - s.Bucket = &v - return s -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { - s.NotificationConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput -type PutBucketNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest -type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The bucket policy as a JSON document. - // - // Policy is a required field - Policy *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { - s.Bucket = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { - s.Policy = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput -type PutBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest -type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - // - // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) - } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { - s.Bucket = &v - return s -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { - s.ReplicationConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput -type PutBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest -type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) - } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. -func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { - s.RequestPaymentConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput -type PutBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest -type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { - s.Bucket = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { - s.Tagging = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput -type PutBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest -type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { - s.Bucket = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { - s.MFA = &v - return s -} - -// SetVersioningConfiguration sets the VersioningConfiguration field's value. -func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { - s.VersioningConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput -type PutBucketVersioningOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest -type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) - } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { - s.Bucket = &v - return s -} - -// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. -func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { - s.WebsiteConfiguration = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest -type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { - s.Bucket = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput -type PutObjectAclOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest -type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the PUT operation was initiated. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the PUT operation was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectInput) SetACL(v string) *PutObjectInput { - s.ACL = &v - return s -} - -// SetBody sets the Body field's value. -func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { - s.Bucket = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { - s.ContentType = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest -type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { - s.Key = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput -type PutObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s PutObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v - return s -} - -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration -type QueueConfiguration struct { - _ struct{} `type:"structure"` - - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - // - // QueueArn is a required field - QueueArn *string `locationName:"Queue" type:"string" required:"true"` -} - -// String returns the string representation -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { - s.Id = &v - return s -} - -// SetQueueArn sets the QueueArn field's value. -func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { - s.QueueArn = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - Queue *string `type:"string"` -} - -// String returns the string representation -func (s QueueConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { - s.Id = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { - s.Queue = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect -type Redirect struct { - _ struct{} `type:"structure"` - - // The host name to use in the redirect request. - HostName *string `type:"string"` - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` -} - -// String returns the string representation -func (s Redirect) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Redirect) GoString() string { - return s.String() -} - -// SetHostName sets the HostName field's value. -func (s *Redirect) SetHostName(v string) *Redirect { - s.HostName = &v - return s -} - -// SetHttpRedirectCode sets the HttpRedirectCode field's value. -func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { - s.HttpRedirectCode = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *Redirect) SetProtocol(v string) *Redirect { - s.Protocol = &v - return s -} - -// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. -func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { - s.ReplaceKeyPrefixWith = &v - return s -} - -// SetReplaceKeyWith sets the ReplaceKeyWith field's value. -func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { - s.ReplaceKeyWith = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` - - // Name of the host where requests will be redirected. - // - // HostName is a required field - HostName *string `type:"string" required:"true"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` -} - -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetHostName sets the HostName field's value. -func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { - s.HostName = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { - s.Protocol = &v - return s -} - -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` - - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - // - // Role is a required field - Role *string `type:"string" required:"true"` - - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - // - // Rules is a required field - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRole sets the Role field's value. -func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { - s.Role = &v - return s -} - -// SetRules sets the Rules field's value. -func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { - s.Rules = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule -type ReplicationRule struct { - _ struct{} `type:"structure"` - - // Destination is a required field - Destination *Destination `type:"structure" required:"true"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // The rule is ignored if status is not Enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` -} - -// String returns the string representation -func (s ReplicationRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v - return s -} - -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration -type RequestPaymentConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - // - // Payer is a required field - Payer *string `type:"string" required:"true" enum:"Payer"` -} - -// String returns the string representation -func (s RequestPaymentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPayer sets the Payer field's value. -func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { - s.Payer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest -type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s RestoreObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v - return s -} - -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput -type RestoreObjectOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s RestoreObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest -type RestoreRequest struct { - _ struct{} `type:"structure"` - - // Lifetime of the active copy in days - // - // Days is a required field - Days *int64 `type:"integer" required:"true"` - - // Glacier related prameters pertaining to this job. - GlacierJobParameters *GlacierJobParameters `type:"structure"` -} - -// String returns the string representation -func (s RestoreRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule -type RoutingRule struct { - _ struct{} `type:"structure"` - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` -} - -// String returns the string representation -func (s RoutingRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RoutingRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule -type Rule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transition *Transition `type:"structure"` -} - -// String returns the string representation -func (s Rule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Rule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v - return s -} - -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v - return s -} - -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis -type StorageClassAnalysis struct { - _ struct{} `type:"structure"` - - // A container used to describe how data related to the storage class analysis - // should be exported. - DataExport *StorageClassAnalysisDataExport `type:"structure"` -} - -// String returns the string representation -func (s StorageClassAnalysis) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StorageClassAnalysis) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysis) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} - if s.DataExport != nil { - if err := s.DataExport.Validate(); err != nil { - invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDataExport sets the DataExport field's value. -func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { - s.DataExport = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport -type StorageClassAnalysisDataExport struct { - _ struct{} `type:"structure"` - - // The place to store the data for an analysis. - // - // Destination is a required field - Destination *AnalyticsExportDestination `type:"structure" required:"true"` - - // The version of the output schema to use when exporting data. Must be V_1. - // - // OutputSchemaVersion is a required field - OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` -} - -// String returns the string representation -func (s StorageClassAnalysisDataExport) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StorageClassAnalysisDataExport) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysisDataExport) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.OutputSchemaVersion == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { - s.Destination = v - return s -} - -// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. -func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { - s.OutputSchemaVersion = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag -type Tag struct { - _ struct{} `type:"structure"` - - // Name of the tag. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // Value of the tag. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging -type Tagging struct { - _ struct{} `type:"structure"` - - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s Tagging) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tagging) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tagging) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tagging"} - if s.TagSet == nil { - invalidParams.Add(request.NewErrParamRequired("TagSet")) - } - if s.TagSet != nil { - for i, v := range s.TagSet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTagSet sets the TagSet field's value. -func (s *Tagging) SetTagSet(v []*Tag) *Tagging { - s.TagSet = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant -type TargetGrant struct { - _ struct{} `type:"structure"` - - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Logging permissions assigned to the Grantee for the bucket. - Permission *string `type:"string" enum:"BucketLogsPermission"` -} - -// String returns the string representation -func (s TargetGrant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TargetGrant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TargetGrant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *TargetGrant) SetPermission(v string) *TargetGrant { - s.Permission = &v - return s -} - -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration -type TopicConfiguration struct { - _ struct{} `type:"structure"` - - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - // - // TopicArn is a required field - TopicArn *string `locationName:"Topic" type:"string" required:"true"` -} - -// String returns the string representation -func (s TopicConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TopicConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { - s.Id = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { - s.TopicArn = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated -type TopicConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic to which Amazon S3 will publish a message to report the - // specified events for the bucket. - Topic *string `type:"string"` -} - -// String returns the string representation -func (s TopicConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { - s.Id = &v - return s -} - -// SetTopic sets the Topic field's value. -func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { - s.Topic = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition -type Transition struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s Transition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Transition) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *Transition) SetDate(v time.Time) *Transition { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *Transition) SetDays(v int64) *Transition { - s.Days = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Transition) SetStorageClass(v string) *Transition { - s.StorageClass = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest -type UploadPartCopyInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // The range of bytes to copy from the source object. The range value must use - // the form bytes=first-last, where the first and last are the zero-based byte - // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. - CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being copied. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartCopyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartCopyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { - s.Bucket = &v - return s -} - -// SetCopySource sets the CopySource field's value. -func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceRange sets the CopySourceRange field's value. -func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { - s.CopySourceRange = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKey = &v - return s -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput -type UploadPartCopyOutput struct { - _ struct{} `type:"structure" payload:"CopyPartResult"` - - CopyPartResult *CopyPartResult `type:"structure"` - - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartCopyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyOutput) GoString() string { - return s.String() -} - -// SetCopyPartResult sets the CopyPartResult field's value. -func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { - s.CopyPartResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { - s.CopySourceVersionId = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { - s.ServerSideEncryption = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest -type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the multipart upload was initiated. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being uploaded. This is a positive integer between 1 - // and 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being uploaded. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBody sets the Body field's value. -func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { - s.Bucket = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { - s.ContentLength = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartInput) SetKey(v string) *UploadPartInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { - s.SSECustomerKey = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { - s.UploadId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput -type UploadPartOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { - s.ETag = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { - s.ServerSideEncryption = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration -type VersioningConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s VersioningConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VersioningConfiguration) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { - s.Status = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration -type WebsiteConfiguration struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s WebsiteConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WebsiteConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WebsiteConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} - if s.ErrorDocument != nil { - if err := s.ErrorDocument.Validate(); err != nil { - invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) - } - } - if s.IndexDocument != nil { - if err := s.IndexDocument.Validate(); err != nil { - invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) - } - } - if s.RedirectAllRequestsTo != nil { - if err := s.RedirectAllRequestsTo.Validate(); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) - } - } - if s.RoutingRules != nil { - for i, v := range s.RoutingRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { - s.RoutingRules = v - return s -} - -const ( - // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value - AnalyticsS3ExportFileFormatCsv = "CSV" -) - -const ( - // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value - BucketAccelerateStatusEnabled = "Enabled" - - // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value - BucketAccelerateStatusSuspended = "Suspended" -) - -const ( - // BucketCannedACLPrivate is a BucketCannedACL enum value - BucketCannedACLPrivate = "private" - - // BucketCannedACLPublicRead is a BucketCannedACL enum value - BucketCannedACLPublicRead = "public-read" - - // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value - BucketCannedACLPublicReadWrite = "public-read-write" - - // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value - BucketCannedACLAuthenticatedRead = "authenticated-read" -) - -const ( - // BucketLocationConstraintEu is a BucketLocationConstraint enum value - BucketLocationConstraintEu = "EU" - - // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest1 = "eu-west-1" - - // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest1 = "us-west-1" - - // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest2 = "us-west-2" - - // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSouth1 = "ap-south-1" - - // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - - // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - - // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - - // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintSaEast1 = "sa-east-1" - - // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value - BucketLocationConstraintCnNorth1 = "cn-north-1" - - // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuCentral1 = "eu-central-1" -) - -const ( - // BucketLogsPermissionFullControl is a BucketLogsPermission enum value - BucketLogsPermissionFullControl = "FULL_CONTROL" - - // BucketLogsPermissionRead is a BucketLogsPermission enum value - BucketLogsPermissionRead = "READ" - - // BucketLogsPermissionWrite is a BucketLogsPermission enum value - BucketLogsPermissionWrite = "WRITE" -) - -const ( - // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value - BucketVersioningStatusEnabled = "Enabled" - - // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value - BucketVersioningStatusSuspended = "Suspended" -) - -// Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters -// with an ASCII value from 0 to 10. For characters that are not supported in -// XML 1.0, you can add this parameter to request that Amazon S3 encode the -// keys in the response. -const ( - // EncodingTypeUrl is a EncodingType enum value - EncodingTypeUrl = "url" -) - -// Bucket event for which to send notifications. -const ( - // EventS3ReducedRedundancyLostObject is a Event enum value - EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - - // EventS3ObjectCreated is a Event enum value - EventS3ObjectCreated = "s3:ObjectCreated:*" - - // EventS3ObjectCreatedPut is a Event enum value - EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - - // EventS3ObjectCreatedPost is a Event enum value - EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - - // EventS3ObjectCreatedCopy is a Event enum value - EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - - // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value - EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - - // EventS3ObjectRemoved is a Event enum value - EventS3ObjectRemoved = "s3:ObjectRemoved:*" - - // EventS3ObjectRemovedDelete is a Event enum value - EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - - // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value - EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" -) - -const ( - // ExpirationStatusEnabled is a ExpirationStatus enum value - ExpirationStatusEnabled = "Enabled" - - // ExpirationStatusDisabled is a ExpirationStatus enum value - ExpirationStatusDisabled = "Disabled" -) - -const ( - // FilterRuleNamePrefix is a FilterRuleName enum value - FilterRuleNamePrefix = "prefix" - - // FilterRuleNameSuffix is a FilterRuleName enum value - FilterRuleNameSuffix = "suffix" -) - -const ( - // InventoryFormatCsv is a InventoryFormat enum value - InventoryFormatCsv = "CSV" -) - -const ( - // InventoryFrequencyDaily is a InventoryFrequency enum value - InventoryFrequencyDaily = "Daily" - - // InventoryFrequencyWeekly is a InventoryFrequency enum value - InventoryFrequencyWeekly = "Weekly" -) - -const ( - // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsAll = "All" - - // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsCurrent = "Current" -) - -const ( - // InventoryOptionalFieldSize is a InventoryOptionalField enum value - InventoryOptionalFieldSize = "Size" - - // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value - InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" - - // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value - InventoryOptionalFieldStorageClass = "StorageClass" - - // InventoryOptionalFieldEtag is a InventoryOptionalField enum value - InventoryOptionalFieldEtag = "ETag" - - // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value - InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" - - // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value - InventoryOptionalFieldReplicationStatus = "ReplicationStatus" -) - -const ( - // MFADeleteEnabled is a MFADelete enum value - MFADeleteEnabled = "Enabled" - - // MFADeleteDisabled is a MFADelete enum value - MFADeleteDisabled = "Disabled" -) - -const ( - // MFADeleteStatusEnabled is a MFADeleteStatus enum value - MFADeleteStatusEnabled = "Enabled" - - // MFADeleteStatusDisabled is a MFADeleteStatus enum value - MFADeleteStatusDisabled = "Disabled" -) - -const ( - // MetadataDirectiveCopy is a MetadataDirective enum value - MetadataDirectiveCopy = "COPY" - - // MetadataDirectiveReplace is a MetadataDirective enum value - MetadataDirectiveReplace = "REPLACE" -) - -const ( - // ObjectCannedACLPrivate is a ObjectCannedACL enum value - ObjectCannedACLPrivate = "private" - - // ObjectCannedACLPublicRead is a ObjectCannedACL enum value - ObjectCannedACLPublicRead = "public-read" - - // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value - ObjectCannedACLPublicReadWrite = "public-read-write" - - // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value - ObjectCannedACLAuthenticatedRead = "authenticated-read" - - // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value - ObjectCannedACLAwsExecRead = "aws-exec-read" - - // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - - // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" -) - -const ( - // ObjectStorageClassStandard is a ObjectStorageClass enum value - ObjectStorageClassStandard = "STANDARD" - - // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value - ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // ObjectStorageClassGlacier is a ObjectStorageClass enum value - ObjectStorageClassGlacier = "GLACIER" -) - -const ( - // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value - ObjectVersionStorageClassStandard = "STANDARD" -) - -const ( - // PayerRequester is a Payer enum value - PayerRequester = "Requester" - - // PayerBucketOwner is a Payer enum value - PayerBucketOwner = "BucketOwner" -) - -const ( - // PermissionFullControl is a Permission enum value - PermissionFullControl = "FULL_CONTROL" - - // PermissionWrite is a Permission enum value - PermissionWrite = "WRITE" - - // PermissionWriteAcp is a Permission enum value - PermissionWriteAcp = "WRITE_ACP" - - // PermissionRead is a Permission enum value - PermissionRead = "READ" - - // PermissionReadAcp is a Permission enum value - PermissionReadAcp = "READ_ACP" -) - -const ( - // ProtocolHttp is a Protocol enum value - ProtocolHttp = "http" - - // ProtocolHttps is a Protocol enum value - ProtocolHttps = "https" -) - -const ( - // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusEnabled = "Enabled" - - // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusDisabled = "Disabled" -) - -const ( - // ReplicationStatusComplete is a ReplicationStatus enum value - ReplicationStatusComplete = "COMPLETE" - - // ReplicationStatusPending is a ReplicationStatus enum value - ReplicationStatusPending = "PENDING" - - // ReplicationStatusFailed is a ReplicationStatus enum value - ReplicationStatusFailed = "FAILED" - - // ReplicationStatusReplica is a ReplicationStatus enum value - ReplicationStatusReplica = "REPLICA" -) - -// If present, indicates that the requester was successfully charged for the -// request. -const ( - // RequestChargedRequester is a RequestCharged enum value - RequestChargedRequester = "requester" -) - -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html -const ( - // RequestPayerRequester is a RequestPayer enum value - RequestPayerRequester = "requester" -) - -const ( - // ServerSideEncryptionAes256 is a ServerSideEncryption enum value - ServerSideEncryptionAes256 = "AES256" - - // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value - ServerSideEncryptionAwsKms = "aws:kms" -) - -const ( - // StorageClassStandard is a StorageClass enum value - StorageClassStandard = "STANDARD" - - // StorageClassReducedRedundancy is a StorageClass enum value - StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // StorageClassStandardIa is a StorageClass enum value - StorageClassStandardIa = "STANDARD_IA" -) - -const ( - // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value - StorageClassAnalysisSchemaVersionV1 = "V_1" -) - -const ( - // TaggingDirectiveCopy is a TaggingDirective enum value - TaggingDirectiveCopy = "COPY" - - // TaggingDirectiveReplace is a TaggingDirective enum value - TaggingDirectiveReplace = "REPLACE" -) - -const ( - // TierStandard is a Tier enum value - TierStandard = "Standard" - - // TierBulk is a Tier enum value - TierBulk = "Bulk" - - // TierExpedited is a Tier enum value - TierExpedited = "Expedited" -) - -const ( - // TransitionStorageClassGlacier is a TransitionStorageClass enum value - TransitionStorageClassGlacier = "GLACIER" - - // TransitionStorageClassStandardIa is a TransitionStorageClass enum value - TransitionStorageClassStandardIa = "STANDARD_IA" -) - -const ( - // TypeCanonicalUser is a Type enum value - TypeCanonicalUser = "CanonicalUser" - - // TypeAmazonCustomerByEmail is a Type enum value - TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - - // TypeGroup is a Type enum value - TypeGroup = "Group" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go deleted file mode 100644 index bc68a46..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ /dev/null @@ -1,106 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) - -// NormalizeBucketLocation is a utility function which will update the -// passed in value to always be a region ID. Generally this would be used -// with GetBucketLocation API operation. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -func NormalizeBucketLocation(loc string) string { - switch loc { - case "": - loc = "us-east-1" - case "EU": - loc = "eu-west-1" - } - - return loc -} - -// NormalizeBucketLocationHandler is a request handler which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }) -// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -// err := req.Send() -var NormalizeBucketLocationHandler = request.NamedHandler{ - Name: "awssdk.s3.NormalizeBucketLocation", - Fn: func(req *request.Request) { - if req.Error != nil { - return - } - - out := req.Data.(*GetBucketLocationOutput) - loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) - out.LocationConstraint = aws.String(loc) - }, -} - -// WithNormalizeBucketLocation is a request option which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// result, err := svc.GetBucketLocationWithContext(ctx, -// &s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }, -// s3.WithNormalizeBucketLocation, -// ) -func WithNormalizeBucketLocation(r *request.Request) { - r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -} - -func buildGetBucketLocation(r *request.Request) { - if r.DataFilled() { - out := r.Data.(*GetBucketLocationOutput) - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) - return - } - - match := reBucketLocation.FindSubmatch(b) - if len(match) > 1 { - loc := string(match[1]) - out.LocationConstraint = aws.String(loc) - } - } -} - -func populateLocationConstraint(r *request.Request) { - if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { - in := r.Params.(*CreateBucketInput) - if in.CreateBucketConfiguration == nil { - r.Params = awsutil.CopyOf(r.Params) - in = r.Params.(*CreateBucketInput) - in.CreateBucketConfiguration = &CreateBucketConfiguration{ - LocationConstraint: r.Config.Region, - } - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go deleted file mode 100644 index 8463347..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ /dev/null @@ -1,46 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" -) - -func init() { - initClient = defaultInitClientFn - initRequest = defaultInitRequestFn -} - -func defaultInitClientFn(c *client.Client) { - // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) - - // Require SSL when using SSE keys - c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) - - // S3 uses custom error unmarshaling logic - c.Handlers.UnmarshalError.Clear() - c.Handlers.UnmarshalError.PushBack(unmarshalError) -} - -func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. - // e.g. 100-continue support for PUT requests using Go 1.6 - platformRequestHandlers(r) - - switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go deleted file mode 100644 index f045fd0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package s3 provides the client and types for making API -// requests to Amazon Simple Storage Service. -// -// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. -// -// See s3 package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ -// -// Using the Client -// -// To use the client for Amazon Simple Storage Service you will first need -// to create a new instance of it. -// -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := s3.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws package's Config type for more information on configuration options. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Storage Service client S3 for more -// information on creating the service's client. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AbortMultipartUpload(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AbortMultipartUpload result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AbortMultipartUploadWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go deleted file mode 100644 index b794a63..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ /dev/null @@ -1,109 +0,0 @@ -// Upload Managers -// -// The s3manager package's Uploader provides concurrent upload of content to S3 -// by taking advantage of S3's Multipart APIs. The Uploader also supports both -// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker -// for optimizations if the Body satisfies that type. Once the Uploader instance -// is created you can call Upload concurrently from multiple goroutines safely. -// -// // The session the S3 Uploader will use -// sess := session.Must(session.NewSession()) -// -// // Create an uploader with the session and default options -// uploader := s3manager.NewUploader(sess) -// -// f, err := os.Open(filename) -// if err != nil { -// return fmt.Errorf("failed to open file %q, %v", filename, err) -// } -// -// // Upload the file to S3. -// result, err := uploader.Upload(&s3manager.UploadInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// Body: f, -// }) -// if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) -// } -// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) -// -// See the s3manager package's Uploader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader -// -// Download Manager -// -// The s3manager package's Downloader provides concurrently downloading of Objects -// from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Upload concurrently from -// multiple goroutines safely. -// -// // The session the S3 Downloader will use -// sess := session.Must(session.NewSession()) -// -// // Create a downloader with the session and default options -// downloader := s3manager.NewDownloader(sess) -// -// // Create a file to write the S3 Object contents to. -// f, err := os.Create(filename) -// if err != nil { -// return fmt.Errorf("failed to create file %q, %v", filename, err) -// } -// -// // Write the contents of S3 Object to the file -// n, err := downloader.Download(f, &s3.GetObjectInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// }) -// if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) -// } -// fmt.Printf("file downloaded, %d bytes\n", n) -// -// See the s3manager package's Downloader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader -// -// Get Bucket Region -// -// GetBucketRegion will attempt to get the region for a bucket using a region -// hint to determine which AWS partition to perform the query on. Use this utility -// to determine the region a bucket is in. -// -// sess := session.Must(session.NewSession()) -// -// bucket := "my-bucket" -// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") -// if err != nil { -// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) -// } -// return err -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -// See the s3manager package's GetBucketRegion function documentation for more information -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion -// -// S3 Crypto Client -// -// The s3crypto package provides the tools to upload and download encrypted -// content from S3. The Encryption and Decryption clients can be used concurrently -// once the client is created. -// -// sess := session.Must(session.NewSession()) -// -// // Create the decryption client. -// svc := s3crypto.NewDecryptionClient(sess) -// -// // The object will be downloaded from S3 and decrypted locally. By metadata -// // about the object's encryption will instruct the decryption client how -// // decrypt the content of the object. By default KMS is used for keys. -// result, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String(myBucket), -// Key: aws.String(myKey), -// }) -// -// See the s3crypto package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ -// -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go deleted file mode 100644 index 931cb17..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -const ( - - // ErrCodeBucketAlreadyExists for service response error code - // "BucketAlreadyExists". - // - // The requested bucket name is not available. The bucket namespace is shared - // by all users of the system. Please select a different name and try again. - ErrCodeBucketAlreadyExists = "BucketAlreadyExists" - - // ErrCodeBucketAlreadyOwnedByYou for service response error code - // "BucketAlreadyOwnedByYou". - ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" - - // ErrCodeNoSuchBucket for service response error code - // "NoSuchBucket". - // - // The specified bucket does not exist. - ErrCodeNoSuchBucket = "NoSuchBucket" - - // ErrCodeNoSuchKey for service response error code - // "NoSuchKey". - // - // The specified key does not exist. - ErrCodeNoSuchKey = "NoSuchKey" - - // ErrCodeNoSuchUpload for service response error code - // "NoSuchUpload". - // - // The specified multipart upload does not exist. - ErrCodeNoSuchUpload = "NoSuchUpload" - - // ErrCodeObjectAlreadyInActiveTierError for service response error code - // "ObjectAlreadyInActiveTierError". - // - // This operation is not allowed against this storage tier - ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" - - // ErrCodeObjectNotInActiveTierError for service response error code - // "ObjectNotInActiveTierError". - // - // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon Glacier. - ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go deleted file mode 100644 index ec3ffe4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ /dev/null @@ -1,162 +0,0 @@ -package s3 - -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// an operationBlacklist is a list of operation names that should a -// request handler should not be executed with. -type operationBlacklist []string - -// Continue will return true of the Request's operation name is not -// in the blacklist. False otherwise. -func (b operationBlacklist) Continue(r *request.Request) bool { - for i := 0; i < len(b); i++ { - if b[i] == r.Operation.Name { - return false - } - } - return true -} - -var accelerateOpBlacklist = operationBlacklist{ - opListBuckets, opCreateBucket, opDeleteBucket, -} - -// Request handler to automatically add the bucket name to the endpoint domain -// if possible. This style of bucket is valid for all bucket names which are -// DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { - forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) - accelerate := aws.BoolValue(r.Config.S3UseAccelerate) - - if accelerate && accelerateOpBlacklist.Continue(r) { - if forceHostStyle { - if r.Config.Logger != nil { - r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") - } - } - updateEndpointForAccelerate(r) - } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) - } -} - -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { - // bucket name must be valid to put into the host - return - } - - moveBucketToHost(r.HTTPRequest.URL, bucket) -} - -var ( - accelElem = []byte("s3-accelerate.dualstack.") -) - -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { - r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), - nil) - return - } - - parts := strings.Split(r.HTTPRequest.URL.Host, ".") - if len(parts) < 3 { - r.Error = awserr.New("InvalidParameterExecption", - fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", - r.HTTPRequest.URL.Host), nil) - return - } - - if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { - parts[0] = "s3-accelerate" - } - for i := 1; i+1 < len(parts); i++ { - if parts[i] == aws.StringValue(r.Config.Region) { - parts = append(parts[:i], parts[i+1:]...) - break - } - } - - r.HTTPRequest.URL.Host = strings.Join(parts, ".") - - moveBucketToHost(r.HTTPRequest.URL, bucket) -} - -// Attempts to retrieve the bucket name from the request input parameters. -// If no bucket is found, or the field is empty "", false will be returned. -func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } - } - - return "", false -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if S3ForcePathStyle is -// explicitly set or if the bucket is not DNS compatible. -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) -var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - return reDomain.MatchString(bucket) && - !reIPAddress.MatchString(bucket) && - !strings.Contains(bucket, "..") -} - -// moveBucketToHost moves the bucket name from the URI path to URL host. -func moveBucketToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go deleted file mode 100644 index 8e6f330..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !go1.6 - -package s3 - -import "github.com/aws/aws-sdk-go/aws/request" - -func platformRequestHandlers(r *request.Request) { -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go deleted file mode 100644 index 14d05f7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build go1.6 - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -func platformRequestHandlers(r *request.Request) { - if r.Operation.HTTPMethod == "PUT" { - // 100-Continue should only be used on put requests. - r.Handlers.Sign.PushBack(add100Continue) - } -} - -func add100Continue(r *request.Request) { - if aws.BoolValue(r.Config.S3Disable100Continue) { - return - } - if r.HTTPRequest.ContentLength < 1024*1024*2 { - // Ignore requests smaller than 2MB. This helps prevent delaying - // requests unnecessarily. - return - } - - r.HTTPRequest.Header.Set("Expect", "100-Continue") -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go deleted file mode 100644 index 614e477..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -// S3 provides the API operation methods for making requests to -// Amazon Simple Storage Service. See this package's package overview docs -// for details on the service. -// -// S3 methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type S3 struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "s3" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the S3 client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a S3 client from just a session. -// svc := s3.New(mySession) -// -// // Create a S3 client with additional configuration -// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { - svc := &S3{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2006-03-01", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a S3 operation and runs any -// custom request initialization. -func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go deleted file mode 100644 index 268ea2f..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ /dev/null @@ -1,44 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) - -func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { - r.Error = errSSERequiresSSL - } - } -} - -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", - } - - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go deleted file mode 100644 index 5a78fd3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ /dev/null @@ -1,35 +0,0 @@ -package s3 - -import ( - "bytes" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -func copyMultipartStatusOKUnmarhsalError(r *request.Request) { - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) - return - } - body := bytes.NewReader(b) - r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, 0) - - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - - unmarshalError(r) - if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { - r.Error = nil - return - } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go deleted file mode 100644 index bcca862..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ /dev/null @@ -1,103 +0,0 @@ -package s3 - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - - hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") - - // Bucket exists in a different region, and request needs - // to be made to the correct region. - if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } - return - } - - var errCode, errMsg string - - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" - } else { - errCode = resp.Code - errMsg = resp.Message - err = nil - } - - // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { - statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText - } - - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } -} - -// A RequestFailure provides access to the S3 Request ID and Host ID values -// returned from API operation errors. Getting the error as a string will -// return the formated error with the same information as awserr.RequestFailure, -// while also adding the HostID value from the response. -type RequestFailure interface { - awserr.RequestFailure - - // Host ID is the S3 Host ID needed for debug, and contacting support - HostID() string -} - -type requestFailure struct { - awserr.RequestFailure - - hostID string -} - -func (r requestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r requestFailure) String() string { - return r.Error() -} -func (r requestFailure) HostID() string { - return r.hostID -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go deleted file mode 100644 index cccfa8c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// WaitUntilBucketExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will -// be returned. -func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 301, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 403, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilBucketNotExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will -// be returned. -func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will -// be returned. -func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectNotExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will -// be returned. -func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index e5c105f..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,2365 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See AssumeRole for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) that you can use to access -// AWS resources that you might not normally have access to. Typically, you -// use AssumeRole for cross-account access or federation. For a comparison of -// AssumeRole with the other APIs that produce temporary credentials, see Requesting -// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use credentials for an IAM user or an IAM role -// to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) -// in the IAM User Guide. -// -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your corporate network, you don't have to recreate user identities -// in AWS in order to grant those user identities access to AWS. Instead, after -// a user has been authenticated, you call AssumeRole (and specify the role -// with the appropriate permissions) to get temporary security credentials for -// that user. With those temporary security credentials, you construct a sign-in -// URL that users can use to access the console. For more information, see Common -// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) -// in the IAM User Guide. -// -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: you cannot call -// the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. -// -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user is in a different account than -// the role, then the user's administrator must attach a policy that allows -// the user to call AssumeRole on the ARN of the role in the other account. -// If the user is in the same account as the role, then you can either attach -// a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy -// -// Using MFA with AssumeRole -// -// You can optionally include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication; if the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA devices produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See AssumeRoleWithSAML for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. -// -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means -// that both policies must grant the permission for the action to be allowed. -// This gives you a way to further restrict the permissions for the resulting -// temporary security credentials. You cannot use the passed policy to grant -// permissions that are in excess of those allowed by the access policy of the -// role that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider, and create -// an IAM role that specifies this SAML provider in its trust policy. -// -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See AssumeRoleWithWebIdentity for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider, such as Amazon -// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible -// identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely -// identify a user and supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application, and without deploying server-based -// proxy services that use long-term AWS credentials. Instead, the identity -// of the caller is validated by using a token from the web identity provider. -// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce -// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service APIs. -// -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security -// credentials, and then using those credentials to make a request to AWS. -// -// -// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample -// apps that show how to invoke the identity providers, and then how to use -// the information from these providers to get and use temporary security -// credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the non-AWS identity provider -// (IDP) that was asked to verify the incoming identity token could not be reached. -// This is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the non-AWS identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See DecodeAuthorizationMessage for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. -// -// For example, if a user is not authorized to perform an action that he or -// she has requested, the request returns a Client.UnauthorizedOperation response -// (an HTTP 403 response). Some AWS actions additionally return an encoded message -// that can provide details about this authorization failure. -// -// Only certain AWS actions return an encoded authorization message. The documentation -// for an individual action indicates whether that action returns an encoded -// message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the action -// should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetCallerIdentity for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM identity whose credentials are used to call -// the API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetFederationToken for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. Because -// you must call the GetFederationToken action using the long-term security -// credentials of an IAM user, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other APIs that produce temporary -// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// If you are creating a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider, we recommend that you -// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// The GetFederationToken action must be called by using the long-term AWS security -// credentials of an IAM user. You can also call GetFederationToken using the -// security credentials of an AWS root account, but we do not recommended it. -// Instead, we recommend that you create an IAM user for the purpose of the -// proxy application and then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need access to. For more -// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// The temporary security credentials that are obtained by using the long-term -// credentials of an IAM user are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default -// is 43200 seconds (12 hours). Temporary credentials that are obtained by using -// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). -// -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot use these credentials to call any IAM APIs. -// -// * You cannot call any STS APIs except GetCallerIdentity. -// -// Permissions -// -// The permissions for the temporary security credentials returned by GetFederationToken -// are determined by a combination of the following: -// -// * The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. -// -// * The policy that is passed as a parameter in the call. -// -// The passed policy is attached to the temporary security credentials that -// result from the GetFederationToken API call--that is, to the federated user. -// When the federated user makes an AWS request, AWS evaluates the policy attached -// to the federated user in combination with the policy or policies attached -// to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The -// passed policy cannot grant more permissions than those that are defined in -// the IAM user policy. -// -// A typical use case is that the permissions of the IAM user whose credentials -// are used to call GetFederationToken are designed to allow access to all the -// actions and resources that any federated user will need. Then, for individual -// users, you pass a policy to the operation that scopes down the permissions -// to a level that's appropriate to that individual user, using a policy that -// allows only a subset of permissions that are granted to the IAM user. -// -// If you do not pass a policy, the resulting temporary security credentials -// have no effective permissions. The only exception is when the temporary security -// credentials are used to access a resource that has a resource-based policy -// that specifically allows the federated user to access the resource. -// -// For more information about how permissions work, see Permissions for GetFederationToken -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// See GetSessionToken for usage and error information. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled -// IAM users would need to call GetSessionToken and submit an MFA code that -// is associated with their MFA device. Using the temporary security credentials -// that are returned from the call, IAM users can then make programmatic calls -// to APIs that require MFA authentication. If you do not supply a correct MFA -// code, then the API returns an access denied error. For a comparison of GetSessionToken -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The GetSessionToken action must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify, from 900 seconds -// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default -// of 43200 seconds (12 hours); credentials that are created by using account -// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 -// seconds (1 hour), with a default of 1 hour. -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot call any IAM APIs unless MFA authentication information is -// included in the request. -// -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with root account credentials. -// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. -// -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the action. If GetSessionToken -// is called using root account credentials, the temporary credentials have -// root account permissions. Similarly, if GetSessionToken is called using the -// credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that is used by third parties when assuming roles in - // their customers' accounts. For each role that the third party can assume, - // they should instruct their customers to ensure the role's trust policy checks - // for the external ID that the third party generated. Each time the third party - // assumes the role, they should pass the customer's external ID. The external - // ID is useful in order to help third parties bind a role to the customer who - // created it. For more information about the external ID, see How to Use an - // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validated this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format. - // - // This parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both (the intersection of) the access policy of the role that - // is being assumed, and the policy that you pass. This gives you a way to further - // restrict the permissions for the resulting temporary security credentials. - // You cannot use the passed policy to grant permissions that are in excess - // of those allowed by the access policy of the role that is being assumed. - // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, - // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests using the temporary security credentials will expose the role session - // name to the external account in their CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base-64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the Using IAM guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID Tokens this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // An XML document that contains the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The AWS account ID number of the account that owns or contains the calling - // entity. - Account *string `type:"string"` - - // The AWS ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity making the call. The values returned are those listed in the - // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds - // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained - // using AWS account (root) credentials are restricted to a maximum of 3600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using AWS account (root) credentials defaults to one - // hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that is passed with the GetFederationToken call - // and evaluated along with the policy or policies that are attached to the - // IAM user whose credentials are used to call GetFederationToken. The passed - // policy is used to scope down the permissions that are available to the IAM - // user, by allowing only a subset of the permissions that are granted to the - // IAM user. The passed policy cannot grant more permissions than those granted - // to the IAM user. The final permissions for the federated user are the most - // restrictive set based on the intersection of the passed policy and the IAM - // user policy. - // - // If you do not pass a policy, the resulting temporary security credentials - // have no effective permissions. The only exception is when the temporary security - // credentials are used to access a resource that has a resource-based policy - // that specifically allows the federated user to access the resource. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - // - // For more information about how permissions work, see Permissions for GetFederationToken - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). - Policy *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 - // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). - // If the duration is longer than one hour, the session for AWS account owners - // defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console - // and viewing the user's security credentials. - // - // The regex used to validated this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, and the user does not provide a code when requesting a set of - // temporary security credentials, the user will receive an "access denied" - // response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index 4010cc7..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,12 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = func(r *request.Request) { - switch r.Operation.Name { - case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: - r.Handlers.Sign.Clear() // these operations are unsigned - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index d2af518..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,124 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To use the client for AWS Security Token Service you will first need -// to create a new instance of it. -// -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := sts.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws package's Config type for more information on configuration options. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating the service's client. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AssumeRole(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AssumeRole result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AssumeRoleWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index e24884e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index 1ee5839..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2011-06-15", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE deleted file mode 100644 index aade9a5..0000000 --- a/vendor/github.com/bgentry/go-netrc/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Original version Copyright © 2010 Fazlul Shahriar . Newer -portions Copyright © 2014 Blake Gentry . - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go deleted file mode 100644 index ea49987..0000000 --- a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go +++ /dev/null @@ -1,510 +0,0 @@ -package netrc - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -type tkType int - -const ( - tkMachine tkType = iota - tkDefault - tkLogin - tkPassword - tkAccount - tkMacdef - tkComment - tkWhitespace -) - -var keywords = map[string]tkType{ - "machine": tkMachine, - "default": tkDefault, - "login": tkLogin, - "password": tkPassword, - "account": tkAccount, - "macdef": tkMacdef, - "#": tkComment, -} - -type Netrc struct { - tokens []*token - machines []*Machine - macros Macros - updateLock sync.Mutex -} - -// FindMachine returns the Machine in n named by name. If a machine named by -// name exists, it is returned. If no Machine with name name is found and there -// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil -// is returned. -func (n *Netrc) FindMachine(name string) (m *Machine) { - // TODO(bgentry): not safe for concurrency - var def *Machine - for _, m = range n.machines { - if m.Name == name { - return m - } - if m.IsDefault() { - def = m - } - } - if def == nil { - return nil - } - return def -} - -// MarshalText implements the encoding.TextMarshaler interface to encode a -// Netrc into text format. -func (n *Netrc) MarshalText() (text []byte, err error) { - // TODO(bgentry): not safe for concurrency - for i := range n.tokens { - switch n.tokens[i].kind { - case tkComment, tkDefault, tkWhitespace: // always append these types - text = append(text, n.tokens[i].rawkind...) - default: - if n.tokens[i].value != "" { // skip empty-value tokens - text = append(text, n.tokens[i].rawkind...) - } - } - if n.tokens[i].kind == tkMacdef { - text = append(text, ' ') - text = append(text, n.tokens[i].macroName...) - } - text = append(text, n.tokens[i].rawvalue...) - } - return -} - -func (n *Netrc) NewMachine(name, login, password, account string) *Machine { - n.updateLock.Lock() - defer n.updateLock.Unlock() - - prefix := "\n" - if len(n.tokens) == 0 { - prefix = "" - } - m := &Machine{ - Name: name, - Login: login, - Password: password, - Account: account, - - nametoken: &token{ - kind: tkMachine, - rawkind: []byte(prefix + "machine"), - value: name, - rawvalue: []byte(" " + name), - }, - logintoken: &token{ - kind: tkLogin, - rawkind: []byte("\n\tlogin"), - value: login, - rawvalue: []byte(" " + login), - }, - passtoken: &token{ - kind: tkPassword, - rawkind: []byte("\n\tpassword"), - value: password, - rawvalue: []byte(" " + password), - }, - accounttoken: &token{ - kind: tkAccount, - rawkind: []byte("\n\taccount"), - value: account, - rawvalue: []byte(" " + account), - }, - } - n.insertMachineTokensBeforeDefault(m) - for i := range n.machines { - if n.machines[i].IsDefault() { - n.machines = append(append(n.machines[:i], m), n.machines[i:]...) - return m - } - } - n.machines = append(n.machines, m) - return m -} - -func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { - newtokens := []*token{m.nametoken} - if m.logintoken.value != "" { - newtokens = append(newtokens, m.logintoken) - } - if m.passtoken.value != "" { - newtokens = append(newtokens, m.passtoken) - } - if m.accounttoken.value != "" { - newtokens = append(newtokens, m.accounttoken) - } - for i := range n.tokens { - if n.tokens[i].kind == tkDefault { - // found the default, now insert tokens before it - n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) - return - } - } - // didn't find a default, just add the newtokens to the end - n.tokens = append(n.tokens, newtokens...) - return -} - -func (n *Netrc) RemoveMachine(name string) { - n.updateLock.Lock() - defer n.updateLock.Unlock() - - for i := range n.machines { - if n.machines[i] != nil && n.machines[i].Name == name { - m := n.machines[i] - for _, t := range []*token{ - m.nametoken, m.logintoken, m.passtoken, m.accounttoken, - } { - n.removeToken(t) - } - n.machines = append(n.machines[:i], n.machines[i+1:]...) - return - } - } -} - -func (n *Netrc) removeToken(t *token) { - if t != nil { - for i := range n.tokens { - if n.tokens[i] == t { - n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) - return - } - } - } -} - -// Machine contains information about a remote machine. -type Machine struct { - Name string - Login string - Password string - Account string - - nametoken *token - logintoken *token - passtoken *token - accounttoken *token -} - -// IsDefault returns true if the machine is a "default" token, denoted by an -// empty name. -func (m *Machine) IsDefault() bool { - return m.Name == "" -} - -// UpdatePassword sets the password for the Machine m. -func (m *Machine) UpdatePassword(newpass string) { - m.Password = newpass - updateTokenValue(m.passtoken, newpass) -} - -// UpdateLogin sets the login for the Machine m. -func (m *Machine) UpdateLogin(newlogin string) { - m.Login = newlogin - updateTokenValue(m.logintoken, newlogin) -} - -// UpdateAccount sets the login for the Machine m. -func (m *Machine) UpdateAccount(newaccount string) { - m.Account = newaccount - updateTokenValue(m.accounttoken, newaccount) -} - -func updateTokenValue(t *token, value string) { - oldvalue := t.value - t.value = value - newraw := make([]byte, len(t.rawvalue)) - copy(newraw, t.rawvalue) - t.rawvalue = append( - bytes.TrimSuffix(newraw, []byte(oldvalue)), - []byte(value)..., - ) -} - -// Macros contains all the macro definitions in a netrc file. -type Macros map[string]string - -type token struct { - kind tkType - macroName string - value string - rawkind []byte - rawvalue []byte -} - -// Error represents a netrc file parse error. -type Error struct { - LineNum int // Line number - Msg string // Error message -} - -// Error returns a string representation of error e. -func (e *Error) Error() string { - return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) -} - -func (e *Error) BadDefaultOrder() bool { - return e.Msg == errBadDefaultOrder -} - -const errBadDefaultOrder = "default token must appear after all machine tokens" - -// scanLinesKeepPrefix is a split function for a Scanner that returns each line -// of text. The returned token may include newlines if they are before the -// first non-space character. The returned line may be empty. The end-of-line -// marker is one optional carriage return followed by one mandatory newline. In -// regular expression notation, it is `\r?\n`. The last non-empty line of -// input will be returned even if it has no newline. -func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - // Skip leading spaces. - start := 0 - for width := 0; start < len(data); start += width { - var r rune - r, width = utf8.DecodeRune(data[start:]) - if !unicode.IsSpace(r) { - break - } - } - if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { - // We have a full newline-terminated line. - return start + i, data[0 : start+i], nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -// scanWordsKeepPrefix is a split function for a Scanner that returns each -// space-separated word of text, with prefixing spaces included. It will never -// return an empty string. The definition of space is set by unicode.IsSpace. -// -// Adapted from bufio.ScanWords(). -func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { - // Skip leading spaces. - start := 0 - for width := 0; start < len(data); start += width { - var r rune - r, width = utf8.DecodeRune(data[start:]) - if !unicode.IsSpace(r) { - break - } - } - if atEOF && len(data) == 0 || start == len(data) { - return len(data), data, nil - } - if len(data) > start && data[start] == '#' { - return scanLinesKeepPrefix(data, atEOF) - } - // Scan until space, marking end of word. - for width, i := 0, start; i < len(data); i += width { - var r rune - r, width = utf8.DecodeRune(data[i:]) - if unicode.IsSpace(r) { - return i, data[:i], nil - } - } - // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. - if atEOF && len(data) > start { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -func newToken(rawb []byte) (*token, error) { - _, tkind, err := bufio.ScanWords(rawb, true) - if err != nil { - return nil, err - } - var ok bool - t := token{rawkind: rawb} - t.kind, ok = keywords[string(tkind)] - if !ok { - trimmed := strings.TrimSpace(string(tkind)) - if trimmed == "" { - t.kind = tkWhitespace // whitespace-only, should happen only at EOF - return &t, nil - } - if strings.HasPrefix(trimmed, "#") { - t.kind = tkComment // this is a comment - return &t, nil - } - return &t, fmt.Errorf("keyword expected; got " + string(tkind)) - } - return &t, nil -} - -func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { - if scanner.Scan() { - raw := scanner.Bytes() - pos += bytes.Count(raw, []byte{'\n'}) - return raw, strings.TrimSpace(string(raw)), pos, nil - } - if err := scanner.Err(); err != nil { - return nil, "", pos, &Error{pos, err.Error()} - } - return nil, "", pos, nil -} - -func parse(r io.Reader, pos int) (*Netrc, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} - - defaultSeen := false - var currentMacro *token - var m *Machine - var t *token - scanner := bufio.NewScanner(bytes.NewReader(b)) - scanner.Split(scanTokensKeepPrefix) - - for scanner.Scan() { - rawb := scanner.Bytes() - if len(rawb) == 0 { - break - } - pos += bytes.Count(rawb, []byte{'\n'}) - t, err = newToken(rawb) - if err != nil { - if currentMacro == nil { - return nil, &Error{pos, err.Error()} - } - currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) - continue - } - - if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { - // if macro rawvalue + rawb would contain \n\n, then macro def is over - currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") - nrc.macros[currentMacro.macroName] = currentMacro.value - currentMacro = nil - } - - switch t.kind { - case tkMacdef: - if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - currentMacro = t - case tkDefault: - if defaultSeen { - return nil, &Error{pos, "multiple default token"} - } - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - m = new(Machine) - m.Name = "" - defaultSeen = true - case tkMachine: - if defaultSeen { - return nil, &Error{pos, errBadDefaultOrder} - } - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - m = new(Machine) - if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Name - m.nametoken = t - case tkLogin: - if m == nil || m.Login != "" { - return nil, &Error{pos, "unexpected token login "} - } - if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Login - m.logintoken = t - case tkPassword: - if m == nil || m.Password != "" { - return nil, &Error{pos, "unexpected token password"} - } - if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Password - m.passtoken = t - case tkAccount: - if m == nil || m.Account != "" { - return nil, &Error{pos, "unexpected token account"} - } - if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Account - m.accounttoken = t - } - - nrc.tokens = append(nrc.tokens, t) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - return &nrc, nil -} - -// ParseFile opens the file at filename and then passes its io.Reader to -// Parse(). -func ParseFile(filename string) (*Netrc, error) { - fd, err := os.Open(filename) - if err != nil { - return nil, err - } - defer fd.Close() - return Parse(fd) -} - -// Parse parses from the the Reader r as a netrc file and returns the set of -// machine information and macros defined in it. The ``default'' machine, -// which is intended to be used when no machine name matches, is identified -// by an empty machine name. There can be only one ``default'' machine. -// -// If there is a parsing error, an Error is returned. -func Parse(r io.Reader) (*Netrc, error) { - return parse(r, 1) -} - -// FindMachine parses the netrc file identified by filename and returns the -// Machine named by name. If a problem occurs parsing the file at filename, an -// error is returned. If a machine named by name exists, it is returned. If no -// Machine with name name is found and there is a ``default'' machine, the -// ``default'' machine is returned. Otherwise, nil is returned. -func FindMachine(filename, name string) (m *Machine, err error) { - n, err := ParseFile(filename) - if err != nil { - return nil, err - } - return n.FindMachine(name), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE deleted file mode 100644 index 37d60fc..0000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -MIT License - -Copyright (c) 2017 Blake Gentry - -This license applies to the non-Windows portions of this library. The Windows -portion maintains its own Apache 2.0 license. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS deleted file mode 100644 index ff177f6..0000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [2013] [the CloudFoundry Authors] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md deleted file mode 100644 index fceda75..0000000 --- a/vendor/github.com/bgentry/speakeasy/Readme.md +++ /dev/null @@ -1,30 +0,0 @@ -# Speakeasy - -This package provides cross-platform Go (#golang) helpers for taking user input -from the terminal while not echoing the input back (similar to `getpasswd`). The -package uses syscalls to avoid any dependence on cgo, and is therefore -compatible with cross-compiling. - -[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] - -## Unicode - -Multi-byte unicode characters work successfully on Mac OS X. On Windows, -however, this may be problematic (as is UTF in general on Windows). Other -platforms have not been tested. - -## License - -The code herein was not written by me, but was compiled from two separate open -source packages. Unix portions were imported from [gopass][gopass], while -Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s -[Windows terminal helpers][cf-ui-windows]. - -The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly -from the source (though I attempted to fill in the correct owner in the -boilerplate copyright notice). - -[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" -[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" -[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" -[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go deleted file mode 100644 index 71c1dd1..0000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy.go +++ /dev/null @@ -1,49 +0,0 @@ -package speakeasy - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Ask the user to enter a password with input hidden. prompt is a string to -// display before the user's input. Returns the provided password, or an error -// if the command failed. -func Ask(prompt string) (password string, err error) { - return FAsk(os.Stdout, prompt) -} - -// FAsk is the same as Ask, except it is possible to specify the file to write -// the prompt to. If 'nil' is passed as the writer, no prompt will be written. -func FAsk(wr io.Writer, prompt string) (password string, err error) { - if wr != nil && prompt != "" { - fmt.Fprint(wr, prompt) // Display the prompt. - } - password, err = getPassword() - - // Carriage return after the user input. - if wr != nil { - fmt.Fprintln(wr, "") - } - return -} - -func readline() (value string, err error) { - var valb []byte - var n int - b := make([]byte, 1) - for { - // read one byte at a time so we don't accidentally read extra bytes - n, err = os.Stdin.Read(b) - if err != nil && err != io.EOF { - return "", err - } - if n == 0 || b[0] == '\n' { - break - } - valb = append(valb, b[0]) - } - - return strings.TrimSuffix(string(valb), "\r"), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go deleted file mode 100644 index d99fda1..0000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// based on https://code.google.com/p/gopass -// Author: johnsiilver@gmail.com (John Doak) -// -// Original code is based on code by RogerV in the golang-nuts thread: -// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package speakeasy - -import ( - "fmt" - "os" - "os/signal" - "strings" - "syscall" -) - -const sttyArg0 = "/bin/stty" - -var ( - sttyArgvEOff = []string{"stty", "-echo"} - sttyArgvEOn = []string{"stty", "echo"} -) - -// getPassword gets input hidden from the terminal from a user. This is -// accomplished by turning off terminal echo, reading input from the user and -// finally turning on terminal echo. -func getPassword() (password string, err error) { - sig := make(chan os.Signal, 10) - brk := make(chan bool) - - // File descriptors for stdin, stdout, and stderr. - fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} - - // Setup notifications of termination signals to channel sig, create a process to - // watch for these signals so we can turn back on echo if need be. - signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, - syscall.SIGTERM) - go catchSignal(fd, sig, brk) - - // Turn off the terminal echo. - pid, err := echoOff(fd) - if err != nil { - return "", err - } - - // Turn on the terminal echo and stop listening for signals. - defer signal.Stop(sig) - defer close(brk) - defer echoOn(fd) - - syscall.Wait4(pid, nil, 0, nil) - - line, err := readline() - if err == nil { - password = strings.TrimSpace(line) - } else { - err = fmt.Errorf("failed during password entry: %s", err) - } - - return password, err -} - -// echoOff turns off the terminal echo. -func echoOff(fd []uintptr) (int, error) { - pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) - if err != nil { - return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) - } - return pid, nil -} - -// echoOn turns back on the terminal echo. -func echoOn(fd []uintptr) { - // Turn on the terminal echo. - pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) - if e == nil { - syscall.Wait4(pid, nil, 0, nil) - } -} - -// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn -// terminal echo back on before the program ends. Otherwise the user is left -// with echo off on their terminal. -func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { - select { - case <-sig: - echoOn(fd) - os.Exit(-1) - case <-brk: - } -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go deleted file mode 100644 index c2093a8..0000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package speakeasy - -import ( - "syscall" -) - -// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx -const ENABLE_ECHO_INPUT = 0x0004 - -func getPassword() (password string, err error) { - var oldMode uint32 - - err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) - if err != nil { - return - } - - var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) - - err = setConsoleMode(syscall.Stdin, newMode) - defer setConsoleMode(syscall.Stdin, oldMode) - if err != nil { - return - } - - return readline() -} - -func setConsoleMode(console syscall.Handle, mode uint32) (err error) { - dll := syscall.MustLoadDLL("kernel32") - proc := dll.MustFindProc("SetConsoleMode") - r, _, err := proc.Call(uintptr(console), uintptr(mode)) - - if r == 0 { - return err - } - return nil -} diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86..0000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a..0000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c..0000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebd..0000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d..0000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842..0000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880..0000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802..0000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index c836416..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 8a4a658..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 1fe3cf3..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 7c519ff..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index df1d582..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index c49875b..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index ac034e5..0000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: build test bench vet - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index e67d51f..0000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,746 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - - go get gopkg.in/ini.v1 - -To use with latest changes: - - go get github.com/go-ini/ini - -Please add `-u` flag to update in the future. - -### Testing - -If you want to test on your machine, please apply `-t` flag: - - go get -t gopkg.in/ini.v1 - -Please add `-u` flag to update in the future. - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. - -#### Ignore cases of key name - -When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 and sec2 are the exactly same section object -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 and key2 are the exactly same key object -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### MySQL-like boolean key - -MySQL's configuration allows a key without value as follows: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. - -To generate such keys in your program, you could use `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### Comment - -Take care that following format will be treated as comment: - -1. Line begins with `#` or `;` -2. Words after `#` or `;` -3. Words after section name (i.e words after `[some section name]`) - -If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. - -Alternatively, you can use following `LoadOptions` to completely ignore inline comments: - -```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("section name") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To check if a key exists: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.Section("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -To check if raw value exists: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Well, I hate continuation lines, how do I disable that? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -Holy crap! - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### Auto-split values into a slice - -To use zero value of type for invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -To exclude invalid values out of result slice: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -Or to return nothing but error when have invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -By default, spaces are used to align "=" sign between key and values, to disable that: - -```go -ini.PrettyFormat = false -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### Retrieve parent keys available to a child section - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### Unparseable Sections - -Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Value Mapper - -To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index 0cf4194..0000000 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,733 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - -使用一个特定版本: - - go get gopkg.in/ini.v1 - -使用最新版: - - go get github.com/go-ini/ini - -如需更新请添加 `-u` 选项。 - -### 测试安装 - -如果您想要在自己的机器上运行测试,请使用 `-t` 标记: - - go get -t gopkg.in/ini.v1 - -如需更新请添加 `-u` 选项。 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 - -#### 忽略键名的大小写 - -有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 和 sec2 指向同一个分区对象 -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 和 key2 指向同一个键对象 -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### 类似 MySQL 配置中的布尔值键 - -MySQL 的配置文件中会出现没有具体值的布尔类型的键: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 - -如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### 关于注释 - -下述几种情况的内容将被视为注释: - -1. 所有以 `#` 或 `;` 开头的行 -2. 所有在 `#` 或 `;` 之后的内容 -3. 分区标签后的文字 (即 `[分区名]` 之后的内容) - -如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 - -除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: - -```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("section name") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -判断某个键是否存在: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.Section("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -判断某个原值是否存在: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -哇靠给力啊! - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### 自动分割键值到切片(slice) - -当存在无效输入时,使用零值代替: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -从结果切片中剔除无效输入: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -当存在无效输入时,直接返回错误: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: - -```go -ini.PrettyFormat = false -``` - -## 高级用法 - -### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 获取上级父分区下的所有键名 - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### 无法解析的分区 - -如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 值映射器(Value Mapper) - -值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe74..0000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index f8827dd..0000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.28.1" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - options LoadOptions - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } else if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 || DefaultHeader { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err = buf.WriteString(sec.rawBody); err != nil { - return 0, err - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err = buf.WriteString(kname); err != nil { - return 0, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { - return 0, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 838356a..0000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,699 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - Comment string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 69d5476..0000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - "unicode" -) - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - return line[startIdx : pos+startIdx], nil - } - - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - - // Check continuation lines when desired - if !ignoreContinuation && line[len(line)-1] == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - i := strings.IndexAny(line, "#;") - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - } - - // Trim single quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - section, _ := f.NewSection(DEFAULT_SECTION) - - var line []byte - var inUnparseableSection bool - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) - if err != nil { - return err - } - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index 94f7375..0000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index eeb8dab..0000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b192..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651..0000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index aa20729..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,970 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 8b84d1b..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf5..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index eaad218..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,587 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 1c22550..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,897 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982de..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index ec2289c..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876b..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 5e14513..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,895 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index b2af97f..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,139 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { - return false - } - - return aname == proto.MessageName(pb) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 6c9a6cf..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 9bd3f50..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,139 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595d..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 65cb0f8..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index b2410a0..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce4..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100755 index b50a941..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=(any duration empty struct timestamp wrappers) - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd "$base" - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir - -for file in ${PROTO_FILES[@]}; do - echo 1>&2 "* $file" - protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die - cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file -done - -echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 47f10db..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,134 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index e23e4a2..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index b7cbd17..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,133 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/gucumber/gucumber/LICENSE.txt b/vendor/github.com/gucumber/gucumber/LICENSE.txt deleted file mode 100755 index 342cce5..0000000 --- a/vendor/github.com/gucumber/gucumber/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015 Loren Segal - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gucumber/gucumber/gherkin/i18n.go b/vendor/github.com/gucumber/gucumber/gherkin/i18n.go deleted file mode 100755 index 24c8b57..0000000 --- a/vendor/github.com/gucumber/gucumber/gherkin/i18n.go +++ /dev/null @@ -1,56 +0,0 @@ -package gherkin - -type Language string - -// Translation represents the Gherkin syntax keywords in a single language. -type Translation struct { - // Language specific term representing a feature. - Feature string - - // Language specific term representing the feature background. - Background string - - // Language specific term representing a scenario. - Scenario string - - // Language specific term representing a scenario outline. - Outline string - - // Language specific term representing the "And" step. - And string - - // Language specific term representing the "Given" step. - Given string - - // Language specific term representing the "When" step. - When string - - // Language specific term representing the "Then" step. - Then string - - // Language specific term representing a scenario outline prefix. - Examples string -} - -const ( - // The language code for English translations. - LANG_EN = Language("en") -) - -var ( - // Translations contains internationalized translations of the Gherkin - // syntax keywords in a variety of supported languages. - Translations = map[Language]Translation{ - LANG_EN: Translation{ - Feature: "Feature", - Background: "Background", - Scenario: "Scenario", - Outline: "Scenario Outline", - And: "And", - Given: "Given", - When: "When", - Then: "Then", - Examples: "Examples", - }, - } -) diff --git a/vendor/github.com/gucumber/gucumber/gherkin/parser.go b/vendor/github.com/gucumber/gucumber/gherkin/parser.go deleted file mode 100644 index f4fbf20..0000000 --- a/vendor/github.com/gucumber/gucumber/gherkin/parser.go +++ /dev/null @@ -1,353 +0,0 @@ -package gherkin - -import ( - "fmt" - "path/filepath" - "regexp" - "strings" -) - -var reNL = regexp.MustCompile(`\r?\n`) - -func ParseFilename(data, filename string) ([]Feature, error) { - lines := reNL.Split(data, -1) - p := parser{ - lines: lines, - features: []Feature{}, - translations: Translations[LANG_EN], - filename: filename, - } - if err := p.parse(); err != nil { - return nil, err - } - if len(p.features) == 0 { - return p.features, p.err("no features parsed") - } - return p.features, nil -} - -func Parse(data string) ([]Feature, error) { - return ParseFilename(data, "") -} - -type parser struct { - translations Translation - features []Feature - lines []string - lineNo int - lastLine int - started bool - filename string -} - -func (p parser) err(msg string, args ...interface{}) error { - l := p.lineNo + 1 - if l > len(p.lines) { - l = len(p.lines) - } - if p.filename == "" { - p.filename = ".feature" - } - return fmt.Errorf("parse error (%s:%d): %s.", - filepath.Base(p.filename), l, fmt.Sprintf(msg, args...)) -} - -func (p *parser) line() string { - if p.lineNo < len(p.lines) { - return p.lines[p.lineNo] - } - return "" -} - -func (p *parser) nextLine() bool { - p.lastLine = p.lineNo - if p.started { - p.lineNo++ - } - p.started = true - - for ; p.lineNo < len(p.lines); p.lineNo++ { - line, _ := p.lineStripped() - if line != "" && !strings.HasPrefix(line, "#") { - break - } - } - - return p.stillReading() -} - -func (p *parser) stillReading() bool { - return p.lineNo < len(p.lines) -} - -func (p *parser) unread() { - if p.lineNo == 0 { - p.started = false - } - p.lineNo = p.lastLine -} - -func (p *parser) parse() error { - for p.stillReading() { - if err := p.consumeFeature(); err != nil { - return err - } - } - - return nil -} - -func (p *parser) lineStripped() (string, int) { - line := p.line() - for i := 0; i < len(line); i++ { - c := line[i : i+1] - if c != " " && c != "\t" { - return line[i:], i - } - } - return line, 0 -} - -func (p *parser) consumeFeature() error { - desc := []string{} - tags, err := p.consumeTags() - if err != nil { - return err - } - - if !p.nextLine() { - if len(tags) > 0 { - return p.err("tags not applied to feature") - } - return nil - } - - line, startIndent := p.lineStripped() - parts := strings.SplitN(line, " ", 2) - prefix := p.translations.Feature + ":" - title := "" - - if parts[0] != prefix { - return p.err("expected %q, found %q", prefix, line) - } - - if len(parts) > 1 { - title = parts[1] - } - f := Feature{ - Title: title, Tags: tags, Scenarios: []Scenario{}, - Filename: p.filename, Line: p.lineNo, - } - var stags *[]string - seenScenario, seenBackground := false, false - for p.stillReading() { // consume until we get to Background or Scenario - // find indentation of next line - if p.nextLine() { - _, indent := p.lineStripped() - p.unread() - if indent <= startIndent { // de-dented - break // done parsing this feature - } - } - - prevLine := p.lineNo - tags, err = p.consumeTags() - if err != nil { - return err - } - if p.lineNo != prevLine { // tags found - stags = &tags - } - - if !p.nextLine() { - break - } - - line, _ := p.lineStripped() - parts := strings.SplitN(line, ":", 2) - - switch parts[0] { - case p.translations.Background: - if seenScenario { - return p.err("illegal background after scenario") - } else if seenBackground { - return p.err("multiple backgrounds not allowed") - } - seenBackground = true - - b := Scenario{Filename: p.filename, Line: p.lineNo} - err = p.consumeScenario(&b) - if err != nil { - return err - } - if stags != nil { - b.Tags = *stags - } else { - b.Tags = []string{} - } - f.Background = b - stags = nil - case p.translations.Scenario, p.translations.Outline: - seenScenario = true - - s := Scenario{Filename: p.filename, Line: p.lineNo} - err = p.consumeScenario(&s) - if err != nil { - return err - } - if stags != nil { - s.Tags = *stags - } else { - s.Tags = []string{} - } - if len(parts) > 1 { - s.Title = strings.TrimSpace(parts[1]) - } - f.Scenarios = append(f.Scenarios, s) - stags = nil - default: // then this must be a description - if stags != nil { - return p.err("illegal description text after tags") - } else if seenScenario || seenBackground { - return p.err("illegal description text after scenario") - } - desc = append(desc, line) - } - } - f.Description = strings.Join(desc, "\n") - p.features = append(p.features, f) - - return nil -} - -func (p *parser) consumeScenario(scenario *Scenario) error { - scenario.Steps = []Step{} - _, startIndent := p.lineStripped() - for p.nextLine() { // consume all steps - _, indent := p.lineStripped() - if indent <= startIndent { // de-dented - p.unread() - break - } - - if err := p.consumeStep(scenario); err != nil { - return err - } - } - - return nil -} - -func (p *parser) consumeStep(scenario *Scenario) error { - line, indent := p.lineStripped() - parts := strings.SplitN(line, " ", 2) - - switch parts[0] { - case p.translations.Given, p.translations.Then, - p.translations.When, p.translations.And: - var arg StringData - if len(parts) < 2 { - return p.err("expected step text after %q", parts[0]) - } - if p.nextLine() { - l, _ := p.lineStripped() - p.unread() - if len(l) > 0 && (l[0] == '|' || l == `"""`) { - // this is step argument data - arg = p.consumeIndentedData(indent) - } - } - - var stype StepType - switch parts[0] { - case p.translations.Given: - stype = StepType("Given") - case p.translations.When: - stype = StepType("When") - case p.translations.Then: - stype = StepType("Then") - case p.translations.And: - stype = StepType("And") - } - s := Step{ - Filename: p.filename, Line: p.lineNo, - Type: stype, Text: parts[1], Argument: arg, - } - scenario.Steps = append(scenario.Steps, s) - case p.translations.Examples + ":": - scenario.Examples = p.consumeIndentedData(indent) - default: - return p.err("illegal step prefix %q", parts[0]) - } - return nil -} - -func (p *parser) consumeIndentedData(scenarioIndent int) StringData { - stringData := []string{} - startIndent, quoted := -1, false - for p.nextLine() { - var line string - var indent int - if startIndent == -1 { // first line - line, indent = p.lineStripped() - startIndent = indent - - if line == `"""` { - quoted = true // this is a docstring data block - continue // ignore this from data - } - } else { - line = p.line() - if len(line) <= startIndent { - // not enough text, not part of indented data - p.unread() - break - } - if line[0:startIndent] != strings.Repeat(" ", startIndent) { - // not enough indentation, not part of indented data - p.unread() - break - } - - line = line[startIndent:] - if !quoted && line[0] != '|' { - // tabular data must start with | on each line - p.unread() - break - } - if quoted && line == `"""` { // end quote on docstring block - break - } - - } - - stringData = append(stringData, line) - } - - return StringData(strings.Join(stringData, "\n")) -} - -func (p *parser) consumeTags() ([]string, error) { - tags := []string{} - if !p.nextLine() { - return tags, nil - } - - line, _ := p.lineStripped() - if len(p.lines) == 0 || !strings.HasPrefix(line, "@") { - p.unread() - return tags, nil - } - - for _, t := range strings.Split(line, " ") { - if t == "" { - continue - } - if t[0:1] != "@" { - return nil, p.err("invalid tag %q", t) - } - tags = append(tags, t) - } - - return tags, nil -} diff --git a/vendor/github.com/gucumber/gucumber/gherkin/types.go b/vendor/github.com/gucumber/gucumber/gherkin/types.go deleted file mode 100755 index 5d10e16..0000000 --- a/vendor/github.com/gucumber/gucumber/gherkin/types.go +++ /dev/null @@ -1,226 +0,0 @@ -package gherkin - -import ( - "reflect" - "strings" -) - -// Feature represents the top-most construct in a Gherkin document. A feature -// contains one or more scenarios, which in turn contains multiple steps. -type Feature struct { - // The filename where the feature was defined - Filename string - - // The line number where the feature was defined - Line int - - // The feature's title. - Title string - - // A longer description of the feature. This is not used during runtime. - Description string - - // Any tags associated with this feature. - Tags []string - - // Any background scenario data that is executed prior to scenarios. - Background Scenario - - // The scenarios associated with this feature. - Scenarios []Scenario - - // The longest line length in the feature (including title) - longestLine int -} - -// Scenario represents a scenario (or background) of a given feature. -type Scenario struct { - // The filename where the scenario was defined - Filename string - - // The line number where the scenario was defined - Line int - - // The scenario's title. For backgrounds, this is the empty string. - Title string - - // Any tags associated with this scenario. - Tags []string - - // All steps associated with the scenario. - Steps []Step - - // Contains all scenario outline example data, if provided. - Examples StringData - - // The longest line length in the scenario (including title) - longestLine int -} - -// Step represents an individual step making up a gucumber scenario. -type Step struct { - // The filename where the step was defined - Filename string - - // The line number where the step was defined - Line int - - // The step's "type" (Given, When, Then, And, ...) - // - // Note that this field is normalized to the English form (e.g., "Given"). - Type StepType - - // The text contained in the step (minus the "Type" prefix). - Text string - - // Argument represents multi-line argument data attached to a step. - Argument StringData -} - -// StringData is multi-line docstring text attached to a step. -type StringData string - -// TabularData is tabular text data attached to a step. -type TabularData [][]string - -// TabularDataMap is tabular text data attached to a step organized in map -// form of the header name and its associated row data. -type TabularDataMap map[string][]string - -// StepType represents a given step type. -type StepType string - -// ToTable turns StringData type into a TabularData type -func (s StringData) ToTable() TabularData { - var tabData TabularData - lines := strings.Split(string(s), "\n") - for _, line := range lines { - row := strings.Split(line, "|") - row = row[1 : len(row)-1] - for i, c := range row { - row[i] = strings.TrimSpace(c) - } - tabData = append(tabData, row) - } - return tabData -} - -// IsTabular returns whether the argument data is a table -func (s StringData) IsTabular() bool { - return len(s) > 0 && s[0] == '|' -} - -// ToMap converts a regular table to a map of header names to their row data. -// For example: -// -// t := TabularData{[]string{"header1", "header2"}, []string{"col1", "col2"}} -// t.ToMap() -// // Output: -// // map[string][]string{ -// // "header1": []string{"col1"}, -// // "header2": []string{"col2"}, -// // } -func (t TabularData) ToMap() TabularDataMap { - m := TabularDataMap{} - if len(t) > 1 { - for _, th := range t[0] { - m[th] = []string{} - } - for _, tr := range t[1:] { - for c, td := range tr { - m[t[0][c]] = append(m[t[0][c]], td) - } - } - } - return m -} - -// NumRows returns the number of rows in a table map -func (t TabularDataMap) NumRows() int { - if len(t) == 0 { - return 0 - } - return len(t[reflect.ValueOf(t).MapKeys()[0].String()]) -} - -// LongestLine returns the longest step line in a scenario. -func (s *Scenario) LongestLine() int { - if s.longestLine == 0 { - s.longestLine = len("Scenario: " + s.Title) - for _, step := range s.Steps { - if l := len(string(step.Type) + " " + step.Text); l > s.longestLine { - s.longestLine = l - } - } - } - return s.longestLine -} - -// LongestLine returns the longest step line in a feature. -func (f *Feature) LongestLine() int { - if f.longestLine == 0 { - f.longestLine = len("Feature: " + f.Title) - for _, s := range f.Scenarios { - if l := s.LongestLine(); l > f.longestLine { - f.longestLine = l - } - } - } - return f.longestLine -} - -// FilterMatched returns true if the set of input filters match the feature's tags. -func (f *Feature) FilterMatched(filters ...string) bool { - return matchTags(f.Tags, filters) -} - -// FilterMatched returns true if the set of input filters match the feature's tags. -func (s *Scenario) FilterMatched(f *Feature, filters ...string) bool { - t := []string{} - t = append(t, f.Tags...) - t = append(t, s.Tags...) - return matchTags(t, filters) -} - -func matchTags(tags []string, filters []string) bool { - if len(filters) == 0 { // no filters means everything passes - return true - } - for _, f := range filters { - if matchFilter(f, tags) { - return true // if any filter matches we succeed - } - } - return false -} - -func matchFilter(filter string, tags []string) bool { - parts := strings.Split(filter, ",") - for _, part := range parts { // all parts must match - part = strings.TrimSpace(part) - if part == "" { - continue - } - - if part[0] == '~' { // filter has to NOT match any tags - for _, t := range tags { - if part[1:] == string(t) { // tag matched, this should not happen - return false - } - } - // nothing matched, we can continue on - } else { - result := false - for _, t := range tags { - if part == string(t) { // found a match in a tag - result = true - break - } - } - if !result { // no matches, this filter failed - return false - } - } - } - return true -} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/errwrap/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md deleted file mode 100644 index 1c95f59..0000000 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# errwrap - -`errwrap` is a package for Go that formalizes the pattern of wrapping errors -and checking if an error contains another error. - -There is a common pattern in Go of taking a returned `error` value and -then wrapping it (such as with `fmt.Errorf`) before returning it. The problem -with this pattern is that you completely lose the original `error` structure. - -Arguably the _correct_ approach is that you should make a custom structure -implementing the `error` interface, and have the original error as a field -on that structure, such [as this example](http://golang.org/pkg/os/#PathError). -This is a good approach, but you have to know the entire chain of possible -rewrapping that happens, when you might just care about one. - -`errwrap` formalizes this pattern (it doesn't matter what approach you use -above) by giving a single interface for wrapping errors, checking if a specific -error is wrapped, and extracting that error. - -## Installation and Docs - -Install using `go get github.com/hashicorp/errwrap`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/errwrap - -## Usage - -#### Basic Usage - -Below is a very basic example of its usage: - -```go -// A function that always returns an error, but wraps it, like a real -// function might. -func tryOpen() error { - _, err := os.Open("/i/dont/exist") - if err != nil { - return errwrap.Wrapf("Doesn't exist: {{err}}", err) - } - - return nil -} - -func main() { - err := tryOpen() - - // We can use the Contains helpers to check if an error contains - // another error. It is safe to do this with a nil error, or with - // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { - // Do something - } - if errwrap.ContainsType(err, new(os.PathError)) { - // Do something - } - - // Or we can use the associated `Get` functions to just extract - // a specific error. This would return nil if that specific error doesn't - // exist. - perr := errwrap.GetType(err, new(os.PathError)) -} -``` - -#### Custom Types - -If you're already making custom types that properly wrap errors, then -you can get all the functionality of `errwraps.Contains` and such by -implementing the `Wrapper` interface with just one function. Example: - -```go -type AppError { - Code ErrorCode - Err error -} - -func (e *AppError) WrappedErrors() []error { - return []error{e.Err} -} -``` - -Now this works: - -```go -err := &AppError{Err: fmt.Errorf("an error")} -if errwrap.ContainsType(err, fmt.Errorf("")) { - // This will work! -} -``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go deleted file mode 100644 index a733bef..0000000 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package errwrap implements methods to formalize error wrapping in Go. -// -// All of the top-level functions that take an `error` are built to be able -// to take any error, not just wrapped errors. This allows you to use errwrap -// without having to type-check and type-cast everywhere. -package errwrap - -import ( - "errors" - "reflect" - "strings" -) - -// WalkFunc is the callback called for Walk. -type WalkFunc func(error) - -// Wrapper is an interface that can be implemented by custom types to -// have all the Contains, Get, etc. functions in errwrap work. -// -// When Walk reaches a Wrapper, it will call the callback for every -// wrapped error in addition to the wrapper itself. Since all the top-level -// functions in errwrap use Walk, this means that all those functions work -// with your custom type. -type Wrapper interface { - WrappedErrors() []error -} - -// Wrap defines that outer wraps inner, returning an error type that -// can be cleanly used with the other methods in this package, such as -// Contains, GetAll, etc. -// -// This function won't modify the error message at all (the outer message -// will be used). -func Wrap(outer, inner error) error { - return &wrappedError{ - Outer: outer, - Inner: inner, - } -} - -// Wrapf wraps an error with a formatting message. This is similar to using -// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap -// errors, you should replace it with this. -// -// format is the format of the error message. The string '{{err}}' will -// be replaced with the original error message. -func Wrapf(format string, err error) error { - outerMsg := "" - if err != nil { - outerMsg = err.Error() - } - - outer := errors.New(strings.Replace( - format, "{{err}}", outerMsg, -1)) - - return Wrap(outer, err) -} - -// Contains checks if the given error contains an error with the -// message msg. If err is not a wrapped error, this will always return -// false unless the error itself happens to match this msg. -func Contains(err error, msg string) bool { - return len(GetAll(err, msg)) > 0 -} - -// ContainsType checks if the given error contains an error with -// the same concrete type as v. If err is not a wrapped error, this will -// check the err itself. -func ContainsType(err error, v interface{}) bool { - return len(GetAllType(err, v)) > 0 -} - -// Get is the same as GetAll but returns the deepest matching error. -func Get(err error, msg string) error { - es := GetAll(err, msg) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetType is the same as GetAllType but returns the deepest matching error. -func GetType(err error, v interface{}) error { - es := GetAllType(err, v) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetAll gets all the errors that might be wrapped in err with the -// given message. The order of the errors is such that the outermost -// matching error (the most recent wrap) is index zero, and so on. -func GetAll(err error, msg string) []error { - var result []error - - Walk(err, func(err error) { - if err.Error() == msg { - result = append(result, err) - } - }) - - return result -} - -// GetAllType gets all the errors that are the same type as v. -// -// The order of the return value is the same as described in GetAll. -func GetAllType(err error, v interface{}) []error { - var result []error - - var search string - if v != nil { - search = reflect.TypeOf(v).String() - } - Walk(err, func(err error) { - var needle string - if err != nil { - needle = reflect.TypeOf(err).String() - } - - if needle == search { - result = append(result, err) - } - }) - - return result -} - -// Walk walks all the wrapped errors in err and calls the callback. If -// err isn't a wrapped error, this will be called once for err. If err -// is a wrapped error, the callback will be called for both the wrapper -// that implements error as well as the wrapped error itself. -func Walk(err error, cb WalkFunc) { - if err == nil { - return - } - - switch e := err.(type) { - case *wrappedError: - cb(e.Outer) - Walk(e.Inner, cb) - case Wrapper: - cb(err) - - for _, err := range e.WrappedErrors() { - Walk(err, cb) - } - default: - cb(err) - } -} - -// wrappedError is an implementation of error that has both the -// outer and inner errors. -type wrappedError struct { - Outer error - Inner error -} - -func (w *wrappedError) Error() string { - return w.Outer.Error() -} - -func (w *wrappedError) WrappedErrors() []error { - return []error{w.Outer, w.Inner} -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e531..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index 7d8a57c..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,56 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 0584109..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/go-getter/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md deleted file mode 100644 index e651a34..0000000 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ /dev/null @@ -1,257 +0,0 @@ -# go-getter - -[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] -[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-getter -[godocs]: http://godoc.org/github.com/hashicorp/go-getter -[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master - -go-getter is a library for Go (golang) for downloading files or directories -from various sources using a URL as the primary form of input. - -The power of this library is being flexible in being able to download -from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) -using a single string as input. This removes the burden of knowing how to -download from a variety of sources from the implementer. - -The concept of a _detector_ automatically turns invalid URLs into proper -URLs. For example: "github.com/hashicorp/go-getter" would turn into a -Git URL. Or "./foo" would turn into a file URL. These are extensible. - -This library is used by [Terraform](https://terraform.io) for -downloading modules, [Otto](https://ottoproject.io) for dependencies and -Appfile imports, and [Nomad](https://nomadproject.io) for downloading -binaries. - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-getter -``` - -go-getter also has a command you can use to test URL strings: - -``` -$ go install github.com/hashicorp/go-getter/cmd/go-getter -... - -$ go-getter github.com/foo/bar ./foo -... -``` - -The command is useful for verifying URL structures. - -## URL Format - -go-getter uses a single string URL as input to download from a variety of -protocols. go-getter has various "tricks" with this URL to do certain things. -This section documents the URL format. - -### Supported Protocols and Detectors - -**Protocols** are used to download files/directories using a specific -mechanism. Example protocols are Git and HTTP. - -**Detectors** are used to transform a valid or invalid URL into another -URL if it matches a certain pattern. Example: "github.com/user/repo" is -automatically transformed into a fully valid Git URL. This allows go-getter -to be very user friendly. - -go-getter out of the box supports the following protocols. Additional protocols -can be augmented at runtime by implementing the `Getter` interface. - - * Local files - * Git - * Mercurial - * HTTP - * Amazon S3 - -In addition to the above protocols, go-getter has what are called "detectors." -These take a URL and attempt to automatically choose the best protocol for -it, which might involve even changing the protocol. The following detection -is built-in by default: - - * File paths such as "./foo" are automatically changed to absolute - file URLs. - * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically - changed to Git protocol over HTTP. - * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically - changed to a Git or mercurial protocol using the BitBucket API. - -### Forced Protocol - -In some cases, the protocol to use is ambiguous depending on the source -URL. For example, "http://github.com/mitchellh/vagrant.git" could reference -an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this -URL. - -Forced protocol can be done by prefixing the URL with the protocol followed -by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` -would download the given HTTP URL using the Git protocol. - -Forced protocols will also override any detectors. - -In the absense of a forced protocol, detectors may be run on the URL, transforming -the protocol anyways. The above example would've used the Git protocol either -way since the Git detector would've detected it was a GitHub URL. - -### Protocol-Specific Options - -Each protocol can support protocol-specific options to configure that -protocol. For example, the `git` protocol supports specifying a `ref` -query parameter that tells it what ref to checkout for that Git -repository. - -The options are specified as query parameters on the URL (or URL-like string) -given to go-getter. Using the Git example above, the URL below is a valid -input to go-getter: - - github.com/hashicorp/go-getter?ref=abcd1234 - -The protocol-specific options are documented below the URL format -section. But because they are part of the URL, we point it out here so -you know they exist. - -### Checksumming - -For file downloads of any protocol, go-getter can automatically verify -a checksum for you. Note that checksumming only works for downloading files, -not directories, but checksumming will work for any protocol. - -To checksum a file, append a `checksum` query parameter to the URL. -The paramter value should be in the format of `type:value`, where -type is "md5", "sha1", "sha256", or "sha512". The "value" should be -the actual checksum value. go-getter will parse out this query parameter -automatically and use it to verify the checksum. An example URL -is shown below: - -``` -./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 -``` - -The checksum query parameter is never sent to the backend protocol -implementation. It is used at a higher level by go-getter itself. - -### Unarchiving - -go-getter will automatically unarchive files into a file or directory -based on the extension of the file being requested (over any protocol). -This works for both file and directory downloads. - -go-getter looks for an `archive` query parameter to specify the format of -the archive. If this isn't specified, go-getter will use the extension of -the path to see if it appears archived. Unarchiving can be explicitly -disabled by setting the `archive` query parameter to `false`. - -The following archive formats are supported: - - * `tar.gz` and `tgz` - * `tar.bz2` and `tbz2` - * `zip` - * `gz` - * `bz2` - -For example, an example URL is shown below: - -``` -./foo.zip -``` - -This will automatically be inferred to be a ZIP file and will be extracted. -You can also be explicit about the archive type: - -``` -./some/other/path?archive=zip -``` - -And finally, you can disable archiving completely: - -``` -./some/path?archive=false -``` - -You can combine unarchiving with the other features of go-getter such -as checksumming. The special `archive` query parameter will be removed -from the URL before going to the final protocol downloader. - -## Protocol-Specific Options - -This section documents the protocol-specific options that can be specified -for go-getter. These options should be appended to the input as normal query -parameters. Depending on the usage of go-getter, applications may provide -alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) -provides a nice options block for specifying options rather than in the URL. - -## General (All Protocols) - -The options below are available to all protocols: - - * `archive` - The archive format to use to unarchive this file, or "" (empty - string) to disable unarchiving. For more details, see the complete section - on archive support above. - - * `checksum` - Checksum to verify the downloaded file or archive. See - the entire section on checksumming above for format and more details. - -### Local Files (`file`) - -None - -### Git (`git`) - - * `ref` - The Git ref to checkout. This is a ref, so it can point to - a commit SHA, a branch name, etc. If it is a named ref such as a branch - name, go-getter will update it to the latest on each get. - - * `sshkey` - An SSH private key to use during clones. The provided key must - be a base64-encoded string. For example, to generate a suitable `sshkey` - from a private key file on disk, you would run `base64 -w0 `. - - **Note**: Git 2.3+ is required to use this feature. - -### Mercurial (`hg`) - - * `rev` - The Mercurial revision to checkout. - -### HTTP (`http`) - -#### Basic Authentication - -To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the -hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special -characters, including the username and password, must be URL encoded. - -### S3 (`s3`) - -S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. If -the query parameters are present, these take priority. - - * `aws_access_key_id` - AWS access key. - * `aws_access_key_secret` - AWS access key secret. - * `aws_access_token` - AWS access token if this is being used. - -#### Using IAM Instance Profiles with S3 - -If you use go-getter and want to use an EC2 IAM Instance Profile to avoid -using credentials, then just omit these and the profile, if available will -be used automatically. - -#### S3 Bucket Examples - -S3 has several addressing schemes used to reference your bucket. These are -listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro - -Some examples for these addressing schemes: -- s3::https://s3.amazonaws.com/bucket/foo -- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo -- bucket.s3.amazonaws.com/foo -- bucket.s3-eu-west-1.amazonaws.com/foo/bar - diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml deleted file mode 100644 index 159dad4..0000000 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\github.com\hashicorp\go-getter -environment: - GOPATH: c:\gopath -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go deleted file mode 100644 index 876812a..0000000 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ /dev/null @@ -1,335 +0,0 @@ -package getter - -import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "hash" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - urlhelper "github.com/hashicorp/go-getter/helper/url" -) - -// Client is a client for downloading things. -// -// Top-level functions such as Get are shortcuts for interacting with a client. -// Using a client directly allows more fine-grained control over how downloading -// is done, as well as customizing the protocols supported. -type Client struct { - // Src is the source URL to get. - // - // Dst is the path to save the downloaded thing as. If Dir is set to - // true, then this should be a directory. If the directory doesn't exist, - // it will be created for you. - // - // Pwd is the working directory for detection. If this isn't set, some - // detection may fail. Client will not default pwd to the current - // working directory for security reasons. - Src string - Dst string - Pwd string - - // Mode is the method of download the client will use. See ClientMode - // for documentation. - Mode ClientMode - - // Detectors is the list of detectors that are tried on the source. - // If this is nil, then the default Detectors will be used. - Detectors []Detector - - // Decompressors is the map of decompressors supported by this client. - // If this is nil, then the default value is the Decompressors global. - Decompressors map[string]Decompressor - - // Getters is the map of protocols supported by this client. If this - // is nil, then the default Getters variable will be used. - Getters map[string]Getter - - // Dir, if true, tells the Client it is downloading a directory (versus - // a single file). This distinction is necessary since filenames and - // directory names follow the same format so disambiguating is impossible - // without knowing ahead of time. - // - // WARNING: deprecated. If Mode is set, that will take precedence. - Dir bool -} - -// Get downloads the configured source to the destination. -func (c *Client) Get() error { - // Store this locally since there are cases we swap this - mode := c.Mode - if mode == ClientModeInvalid { - if c.Dir { - mode = ClientModeDir - } else { - mode = ClientModeFile - } - } - - // Default decompressor value - decompressors := c.Decompressors - if decompressors == nil { - decompressors = Decompressors - } - - // Detect the URL. This is safe if it is already detected. - detectors := c.Detectors - if detectors == nil { - detectors = Detectors - } - src, err := Detect(c.Src, c.Pwd, detectors) - if err != nil { - return err - } - - // Determine if we have a forced protocol, i.e. "git::http://..." - force, src := getForcedGetter(src) - - // If there is a subdir component, then we download the root separately - // and then copy over the proper subdir. - var realDst string - dst := c.Dst - src, subDir := SourceDirSubdir(src) - if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - realDst = dst - dst = tmpDir - } - - u, err := urlhelper.Parse(src) - if err != nil { - return err - } - if force == "" { - force = u.Scheme - } - - getters := c.Getters - if getters == nil { - getters = Getters - } - - g, ok := getters[force] - if !ok { - return fmt.Errorf( - "download not supported for scheme '%s'", force) - } - - // We have magic query parameters that we use to signal different features - q := u.Query() - - // Determine if we have an archive type - archiveV := q.Get("archive") - if archiveV != "" { - // Delete the paramter since it is a magic parameter we don't - // want to pass on to the Getter - q.Del("archive") - u.RawQuery = q.Encode() - - // If we can parse the value as a bool and it is false, then - // set the archive to "-" which should never map to a decompressor - if b, err := strconv.ParseBool(archiveV); err == nil && !b { - archiveV = "-" - } - } - if archiveV == "" { - // We don't appear to... but is it part of the filename? - matchingLen := 0 - for k, _ := range decompressors { - if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { - archiveV = k - matchingLen = len(k) - } - } - } - - // If we have a decompressor, then we need to change the destination - // to download to a temporary path. We unarchive this into the final, - // real path. - var decompressDst string - var decompressDir bool - decompressor := decompressors[archiveV] - if decompressor != nil { - // Create a temporary directory to store our archive. We delete - // this at the end of everything. - td, err := ioutil.TempDir("", "getter") - if err != nil { - return fmt.Errorf( - "Error creating temporary directory for archive: %s", err) - } - defer os.RemoveAll(td) - - // Swap the download directory to be our temporary path and - // store the old values. - decompressDst = dst - decompressDir = mode != ClientModeFile - dst = filepath.Join(td, "archive") - mode = ClientModeFile - } - - // Determine if we have a checksum - var checksumHash hash.Hash - var checksumValue []byte - if v := q.Get("checksum"); v != "" { - // Delete the query parameter if we have it. - q.Del("checksum") - u.RawQuery = q.Encode() - - // Determine the checksum hash type - checksumType := "" - idx := strings.Index(v, ":") - if idx > -1 { - checksumType = v[:idx] - } - switch checksumType { - case "md5": - checksumHash = md5.New() - case "sha1": - checksumHash = sha1.New() - case "sha256": - checksumHash = sha256.New() - case "sha512": - checksumHash = sha512.New() - default: - return fmt.Errorf( - "unsupported checksum type: %s", checksumType) - } - - // Get the remainder of the value and parse it into bytes - b, err := hex.DecodeString(v[idx+1:]) - if err != nil { - return fmt.Errorf("invalid checksum: %s", err) - } - - // Set our value - checksumValue = b - } - - if mode == ClientModeAny { - // Ask the getter which client mode to use - mode, err = g.ClientMode(u) - if err != nil { - return err - } - - // Destination is the base name of the URL path in "any" mode when - // a file source is detected. - if mode == ClientModeFile { - dst = filepath.Join(dst, filepath.Base(u.Path)) - } - } - - // If we're not downloading a directory, then just download the file - // and return. - if mode == ClientModeFile { - err := g.GetFile(dst, u) - if err != nil { - return err - } - - if checksumHash != nil { - if err := checksum(dst, checksumHash, checksumValue); err != nil { - return err - } - } - - if decompressor != nil { - // We have a decompressor, so decompress the current destination - // into the final destination with the proper mode. - err := decompressor.Decompress(decompressDst, dst, decompressDir) - if err != nil { - return err - } - - // Swap the information back - dst = decompressDst - if decompressDir { - mode = ClientModeAny - } else { - mode = ClientModeFile - } - } - - // We check the dir value again because it can be switched back - // if we were unarchiving. If we're still only Get-ing a file, then - // we're done. - if mode == ClientModeFile { - return nil - } - } - - // If we're at this point we're either downloading a directory or we've - // downloaded and unarchived a directory and we're just checking subdir. - // In the case we have a decompressor we don't Get because it was Get - // above. - if decompressor == nil { - // If we're getting a directory, then this is an error. You cannot - // checksum a directory. TODO: test - if checksumHash != nil { - return fmt.Errorf( - "checksum cannot be specified for directory download") - } - - // We're downloading a directory, which might require a bit more work - // if we're specifying a subdir. - err := g.Get(dst, u) - if err != nil { - err = fmt.Errorf("error downloading '%s': %s", src, err) - return err - } - } - - // If we have a subdir, copy that over - if subDir != "" { - if err := os.RemoveAll(realDst); err != nil { - return err - } - if err := os.MkdirAll(realDst, 0755); err != nil { - return err - } - - return copyDir(realDst, filepath.Join(dst, subDir), false) - } - - return nil -} - -// checksum is a simple method to compute the checksum of a source file -// and compare it to the given expected value. -func checksum(source string, h hash.Hash, v []byte) error { - f, err := os.Open(source) - if err != nil { - return fmt.Errorf("Failed to open file for checksum: %s", err) - } - defer f.Close() - - if _, err := io.Copy(h, f); err != nil { - return fmt.Errorf("Failed to hash: %s", err) - } - - if actual := h.Sum(nil); !bytes.Equal(actual, v) { - return fmt.Errorf( - "Checksums did not match.\nExpected: %s\nGot: %s", - hex.EncodeToString(v), - hex.EncodeToString(actual)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go deleted file mode 100644 index 7f02509..0000000 --- a/vendor/github.com/hashicorp/go-getter/client_mode.go +++ /dev/null @@ -1,24 +0,0 @@ -package getter - -// ClientMode is the mode that the client operates in. -type ClientMode uint - -const ( - ClientModeInvalid ClientMode = iota - - // ClientModeAny downloads anything it can. In this mode, dst must - // be a directory. If src is a file, it is saved into the directory - // with the basename of the URL. If src is a directory or archive, - // it is unpacked directly into dst. - ClientModeAny - - // ClientModeFile downloads a single file. In this mode, dst must - // be a file path (doesn't have to exist). src must point to a single - // file. It is saved as dst. - ClientModeFile - - // ClientModeDir downloads a directory. In this mode, dst must be - // a directory path (doesn't have to exist). src must point to an - // archive or directory (such as in s3). - ClientModeDir -) diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go deleted file mode 100644 index 2f58e8a..0000000 --- a/vendor/github.com/hashicorp/go-getter/copy_dir.go +++ /dev/null @@ -1,78 +0,0 @@ -package getter - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -// -// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. -func copyDir(dst string, src string, ignoreDot bool) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == src { - return nil - } - - if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go deleted file mode 100644 index d18174c..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ /dev/null @@ -1,29 +0,0 @@ -package getter - -// Decompressor defines the interface that must be implemented to add -// support for decompressing a type. -type Decompressor interface { - // Decompress should decompress src to dst. dir specifies whether dst - // is a directory or single file. src is guaranteed to be a single file - // that exists. dst is not guaranteed to exist already. - Decompress(dst, src string, dir bool) error -} - -// Decompressors is the mapping of extension to the Decompressor implementation -// that will decompress that extension/type. -var Decompressors map[string]Decompressor - -func init() { - tbzDecompressor := new(TarBzip2Decompressor) - tgzDecompressor := new(TarGzipDecompressor) - - Decompressors = map[string]Decompressor{ - "bz2": new(Bzip2Decompressor), - "gz": new(GzipDecompressor), - "tar.bz2": tbzDecompressor, - "tar.gz": tgzDecompressor, - "tbz2": tbzDecompressor, - "tgz": tgzDecompressor, - "zip": new(ZipDecompressor), - } -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go deleted file mode 100644 index 339f4cf..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go +++ /dev/null @@ -1,45 +0,0 @@ -package getter - -import ( - "compress/bzip2" - "fmt" - "io" - "os" - "path/filepath" -) - -// Bzip2Decompressor is an implementation of Decompressor that can -// decompress bz2 files. -type Bzip2Decompressor struct{} - -func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { - // Directory isn't supported at all - if dir { - return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Bzip2 compression is second - bzipR := bzip2.NewReader(f) - - // Copy it out - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, bzipR) - return err -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go deleted file mode 100644 index 2001054..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ /dev/null @@ -1,49 +0,0 @@ -package getter - -import ( - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" -) - -// GzipDecompressor is an implementation of Decompressor that can -// decompress bz2 files. -type GzipDecompressor struct{} - -func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { - // Directory isn't supported at all - if dir { - return fmt.Errorf("gzip-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // gzip compression is second - gzipR, err := gzip.NewReader(f) - if err != nil { - return err - } - defer gzipR.Close() - - // Copy it out - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, gzipR) - return err -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go deleted file mode 100644 index 61f6043..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tar.go +++ /dev/null @@ -1,83 +0,0 @@ -package getter - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" -) - -// untar is a shared helper for untarring an archive. The reader should provide -// an uncompressed view of the tar archive. -func untar(input io.Reader, dst, src string, dir bool) error { - tarR := tar.NewReader(input) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } else { - // There is no ordering guarantee that a file in a directory is - // listed before the directory - dstPath := filepath.Dir(path) - - // Check that the directory exists, otherwise create it - if _, err := os.Stat(dstPath); os.IsNotExist(err) { - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - } - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go deleted file mode 100644 index 5391b5c..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ /dev/null @@ -1,33 +0,0 @@ -package getter - -import ( - "compress/bzip2" - "os" - "path/filepath" -) - -// TarBzip2Decompressor is an implementation of Decompressor that can -// decompress tar.bz2 files. -type TarBzip2Decompressor struct{} - -func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, 0755); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Bzip2 compression is second - bzipR := bzip2.NewReader(f) - return untar(bzipR, dst, src, dir) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go deleted file mode 100644 index 82b8ab4..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ /dev/null @@ -1,135 +0,0 @@ -package getter - -import ( - "crypto/md5" - "encoding/hex" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "sort" - "strings" - - "github.com/mitchellh/go-testing-interface" -) - -// TestDecompressCase is a single test case for testing decompressors -type TestDecompressCase struct { - Input string // Input is the complete path to the input file - Dir bool // Dir is whether or not we're testing directory mode - Err bool // Err is whether we expect an error or not - DirList []string // DirList is the list of files for Dir mode - FileMD5 string // FileMD5 is the expected MD5 for a single file -} - -// TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { - for _, tc := range cases { - t.Logf("Testing: %s", tc.Input) - - // Temporary dir to store stuff - td, err := ioutil.TempDir("", "getter") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Destination is always joining result so that we have a new path - dst := filepath.Join(td, "subdir", "result") - - // We use a function so defers work - func() { - defer os.RemoveAll(td) - - // Decompress - err := d.Decompress(dst, tc.Input, tc.Dir) - if (err != nil) != tc.Err { - t.Fatalf("err %s: %s", tc.Input, err) - } - if tc.Err { - return - } - - // If it isn't a directory, then check for a single file - if !tc.Dir { - fi, err := os.Stat(dst) - if err != nil { - t.Fatalf("err %s: %s", tc.Input, err) - } - if fi.IsDir() { - t.Fatalf("err %s: expected file, got directory", tc.Input) - } - if tc.FileMD5 != "" { - actual := testMD5(t, dst) - expected := tc.FileMD5 - if actual != expected { - t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) - } - } - - return - } - - // Convert expected for windows - expected := tc.DirList - if runtime.GOOS == "windows" { - for i, v := range expected { - expected[i] = strings.Replace(v, "/", "\\", -1) - } - } - - // Directory, check for the correct contents - actual := testListDir(t, dst) - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) - } - }() - } -} - -func testListDir(t testing.T, path string) []string { - var result []string - err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - sub = strings.TrimPrefix(sub, path) - if sub == "" { - return nil - } - sub = sub[1:] // Trim the leading path sep. - - // If it is a dir, add trailing sep - if info.IsDir() { - sub += string(os.PathSeparator) - } - - result = append(result, sub) - return nil - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - sort.Strings(result) - return result -} - -func testMD5(t testing.T, path string) string { - f, err := os.Open(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - h := md5.New() - _, err = io.Copy(h, f) - if err != nil { - t.Fatalf("err: %s", err) - } - - result := h.Sum(nil) - return hex.EncodeToString(result) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go deleted file mode 100644 index 65eb70d..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ /dev/null @@ -1,39 +0,0 @@ -package getter - -import ( - "compress/gzip" - "fmt" - "os" - "path/filepath" -) - -// TarGzipDecompressor is an implementation of Decompressor that can -// decompress tar.gzip files. -type TarGzipDecompressor struct{} - -func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, 0755); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Gzip compression is second - gzipR, err := gzip.NewReader(f) - if err != nil { - return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) - } - defer gzipR.Close() - - return untar(gzipR, dst, src, dir) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go deleted file mode 100644 index a065c07..0000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ /dev/null @@ -1,96 +0,0 @@ -package getter - -import ( - "archive/zip" - "fmt" - "io" - "os" - "path/filepath" -) - -// ZipDecompressor is an implementation of Decompressor that can -// decompress tar.gzip files. -type ZipDecompressor struct{} - -func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, 0755); err != nil { - return err - } - - // Open the zip - zipR, err := zip.OpenReader(src) - if err != nil { - return err - } - defer zipR.Close() - - // Check the zip integrity - if len(zipR.File) == 0 { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - if !dir && len(zipR.File) > 1 { - return fmt.Errorf("expected a single file: %s", src) - } - - // Go through and unarchive - for _, f := range zipR.File { - path := dst - if dir { - path = filepath.Join(path, f.Name) - } - - if f.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // Create the enclosing directories if we must. ZIP files aren't - // required to contain entries for just the directories so this - // can happen. - if dir { - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - } - - // Open the file for reading - srcF, err := f.Open() - if err != nil { - return err - } - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - srcF.Close() - return err - } - _, err = io.Copy(dstF, srcF) - srcF.Close() - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, f.Mode()); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go deleted file mode 100644 index 481b737..0000000 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ /dev/null @@ -1,97 +0,0 @@ -package getter - -import ( - "fmt" - "path/filepath" - - "github.com/hashicorp/go-getter/helper/url" -) - -// Detector defines the interface that an invalid URL or a URL with a blank -// scheme is passed through in order to determine if its shorthand for -// something else well-known. -type Detector interface { - // Detect will detect whether the string matches a known pattern to - // turn it into a proper URL. - Detect(string, string) (string, bool, error) -} - -// Detectors is the list of detectors that are tried on an invalid URL. -// This is also the order they're tried (index 0 is first). -var Detectors []Detector - -func init() { - Detectors = []Detector{ - new(GitHubDetector), - new(BitBucketDetector), - new(S3Detector), - new(FileDetector), - } -} - -// Detect turns a source string into another source string if it is -// detected to be of a known pattern. -// -// The third parameter should be the list of detectors to use in the -// order to try them. If you don't want to configure this, just use -// the global Detectors variable. -// -// This is safe to be called with an already valid source string: Detect -// will just return it. -func Detect(src string, pwd string, ds []Detector) (string, error) { - getForce, getSrc := getForcedGetter(src) - - // Separate out the subdir if there is one, we don't pass that to detect - getSrc, subDir := SourceDirSubdir(getSrc) - - u, err := url.Parse(getSrc) - if err == nil && u.Scheme != "" { - // Valid URL - return src, nil - } - - for _, d := range ds { - result, ok, err := d.Detect(getSrc, pwd) - if err != nil { - return "", err - } - if !ok { - continue - } - - var detectForce string - detectForce, result = getForcedGetter(result) - result, detectSubdir := SourceDirSubdir(result) - - // If we have a subdir from the detection, then prepend it to our - // requested subdir. - if detectSubdir != "" { - if subDir != "" { - subDir = filepath.Join(detectSubdir, subDir) - } else { - subDir = detectSubdir - } - } - if subDir != "" { - u, err := url.Parse(result) - if err != nil { - return "", fmt.Errorf("Error parsing URL: %s", err) - } - u.Path += "//" + subDir - result = u.String() - } - - // Preserve the forced getter if it exists. We try to use the - // original set force first, followed by any force set by the - // detector. - if getForce != "" { - result = fmt.Sprintf("%s::%s", getForce, result) - } else if detectForce != "" { - result = fmt.Sprintf("%s::%s", detectForce, result) - } - - return result, nil - } - - return "", fmt.Errorf("invalid source string: %s", src) -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go deleted file mode 100644 index a183a17..0000000 --- a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go +++ /dev/null @@ -1,66 +0,0 @@ -package getter - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// BitBucketDetector implements Detector to detect BitBucket URLs and turn -// them into URLs that the Git or Hg Getter can understand. -type BitBucketDetector struct{} - -func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "bitbucket.org/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { - u, err := url.Parse("https://" + src) - if err != nil { - return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) - } - - // We need to get info on this BitBucket repository to determine whether - // it is Git or Hg. - var info struct { - SCM string `json:"scm"` - } - infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path - resp, err := http.Get(infoUrl) - if err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - if resp.StatusCode == 403 { - // A private repo - return "", true, fmt.Errorf( - "shorthand BitBucket URL can't be used for private repos, " + - "please use a full URL") - } - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&info); err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - - switch info.SCM { - case "git": - if !strings.HasSuffix(u.Path, ".git") { - u.Path += ".git" - } - - return "git::" + u.String(), true, nil - case "hg": - return "hg::" + u.String(), true, nil - default: - return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) - } -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go deleted file mode 100644 index 756ea43..0000000 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ /dev/null @@ -1,67 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -// FileDetector implements Detector to detect file paths. -type FileDetector struct{} - -func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if !filepath.IsAbs(src) { - if pwd == "" { - return "", true, fmt.Errorf( - "relative paths require a module with a pwd") - } - - // Stat the pwd to determine if its a symbolic link. If it is, - // then the pwd becomes the original directory. Otherwise, - // `filepath.Join` below does some weird stuff. - // - // We just ignore if the pwd doesn't exist. That error will be - // caught later when we try to use the URL. - if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { - if err != nil { - return "", true, err - } - if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) - if err != nil { - return "", true, err - } - - // The symlink itself might be a relative path, so we have to - // resolve this to have a correctly rooted URL. - pwd, err = filepath.Abs(pwd) - if err != nil { - return "", true, err - } - } - } - - src = filepath.Join(pwd, src) - } - - return fmtFileURL(src), true, nil -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - return fmt.Sprintf("file://%s", path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - return fmt.Sprintf("file:///%s", path) -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go deleted file mode 100644 index c084ad9..0000000 --- a/vendor/github.com/hashicorp/go-getter/detect_github.go +++ /dev/null @@ -1,73 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// GitHubDetector implements Detector to detect GitHub URLs and turn -// them into URLs that the Git Getter can understand. -type GitHubDetector struct{} - -func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "github.com/") { - return d.detectHTTP(src) - } else if strings.HasPrefix(src, "git@github.com:") { - return d.detectSSH(src) - } - - return "", false, nil -} - -func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 3 { - return "", false, fmt.Errorf( - "GitHub URLs should be github.com/username/repo") - } - - urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) - } - - if !strings.HasSuffix(url.Path, ".git") { - url.Path += ".git" - } - - if len(parts) > 3 { - url.Path += "//" + strings.Join(parts[3:], "/") - } - - return "git::" + url.String(), true, nil -} - -func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { - idx := strings.Index(src, ":") - qidx := strings.Index(src, "?") - if qidx == -1 { - qidx = len(src) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User("git") - u.Host = "github.com" - u.Path = src[idx+1 : qidx] - if qidx < len(src) { - q, err := url.ParseQuery(src[qidx+1:]) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - - u.RawQuery = q.Encode() - } - - return "git::" + u.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go deleted file mode 100644 index 8e0f4a0..0000000 --- a/vendor/github.com/hashicorp/go-getter/detect_s3.go +++ /dev/null @@ -1,61 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// S3Detector implements Detector to detect S3 URLs and turn -// them into URLs that the S3 getter can understand. -type S3Detector struct{} - -func (d *S3Detector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.Contains(src, ".amazonaws.com/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *S3Detector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 2 { - return "", false, fmt.Errorf( - "URL is not a valid S3 URL") - } - - hostParts := strings.Split(parts[0], ".") - if len(hostParts) == 3 { - return d.detectPathStyle(hostParts[0], parts[1:]) - } else if len(hostParts) == 4 { - return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) - } else { - return "", false, fmt.Errorf( - "URL is not a valid S3 URL") - } -} - -func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { - urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", false, fmt.Errorf("error parsing S3 URL: %s", err) - } - - return "s3::" + url.String(), true, nil -} - -func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { - urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", false, fmt.Errorf("error parsing S3 URL: %s", err) - } - - return "s3::" + url.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go deleted file mode 100644 index 647ccf4..0000000 --- a/vendor/github.com/hashicorp/go-getter/folder_storage.go +++ /dev/null @@ -1,65 +0,0 @@ -package getter - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "os" - "path/filepath" -) - -// FolderStorage is an implementation of the Storage interface that manages -// modules on the disk. -type FolderStorage struct { - // StorageDir is the directory where the modules will be stored. - StorageDir string -} - -// Dir implements Storage.Dir -func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { - d = s.dir(key) - _, err = os.Stat(d) - if err == nil { - // Directory exists - e = true - return - } - if os.IsNotExist(err) { - // Directory doesn't exist - d = "" - e = false - err = nil - return - } - - // An error - d = "" - e = false - return -} - -// Get implements Storage.Get -func (s *FolderStorage) Get(key string, source string, update bool) error { - dir := s.dir(key) - if !update { - if _, err := os.Stat(dir); err == nil { - // If the directory already exists, then we're done since - // we're not updating. - return nil - } else if !os.IsNotExist(err) { - // If the error we got wasn't a file-not-exist error, then - // something went wrong and we should report it. - return fmt.Errorf("Error reading module directory: %s", err) - } - } - - // Get the source. This always forces an update. - return Get(dir, source) -} - -// dir returns the directory name internally that we'll use to map to -// internally. -func (s *FolderStorage) dir(key string) string { - sum := md5.Sum([]byte(key)) - return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) -} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go deleted file mode 100644 index c3236f5..0000000 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ /dev/null @@ -1,139 +0,0 @@ -// getter is a package for downloading files or directories from a variety of -// protocols. -// -// getter is unique in its ability to download both directories and files. -// It also detects certain source strings to be protocol-specific URLs. For -// example, "github.com/hashicorp/go-getter" would turn into a Git URL and -// use the Git protocol. -// -// Protocols and detectors are extensible. -// -// To get started, see Client. -package getter - -import ( - "bytes" - "fmt" - "net/url" - "os/exec" - "regexp" - "syscall" -) - -// Getter defines the interface that schemes must implement to download -// things. -type Getter interface { - // Get downloads the given URL into the given directory. This always - // assumes that we're updating and gets the latest version that it can. - // - // The directory may already exist (if we're updating). If it is in a - // format that isn't understood, an error should be returned. Get shouldn't - // simply nuke the directory. - Get(string, *url.URL) error - - // GetFile downloads the give URL into the given path. The URL must - // reference a single file. If possible, the Getter should check if - // the remote end contains the same file and no-op this operation. - GetFile(string, *url.URL) error - - // ClientMode returns the mode based on the given URL. This is used to - // allow clients to let the getters decide which mode to use. - ClientMode(*url.URL) (ClientMode, error) -} - -// Getters is the mapping of scheme to the Getter implementation that will -// be used to get a dependency. -var Getters map[string]Getter - -// forcedRegexp is the regular expression that finds forced getters. This -// syntax is schema::url, example: git::https://foo.com -var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) - -func init() { - httpGetter := &HttpGetter{Netrc: true} - - Getters = map[string]Getter{ - "file": new(FileGetter), - "git": new(GitGetter), - "hg": new(HgGetter), - "s3": new(S3Getter), - "http": httpGetter, - "https": httpGetter, - } -} - -// Get downloads the directory specified by src into the folder specified by -// dst. If dst already exists, Get will attempt to update it. -// -// src is a URL, whereas dst is always just a file path to a folder. This -// folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: true, - Getters: Getters, - }).Get() -} - -// GetAny downloads a URL into the given destination. Unlike Get or -// GetFile, both directories and files are supported. -// -// dst must be a directory. If src is a file, it will be downloaded -// into dst with the basename of the URL. If src is a directory or -// archive, it will be unpacked directly into dst. -func GetAny(dst, src string) error { - return (&Client{ - Src: src, - Dst: dst, - Mode: ClientModeAny, - Getters: Getters, - }).Get() -} - -// GetFile downloads the file specified by src into the path specified by -// dst. -func GetFile(dst, src string) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: false, - Getters: Getters, - }).Get() -} - -// getRunCommand is a helper that will run a command and capture the output -// in the case an error happens. -func getRunCommand(cmd *exec.Cmd) error { - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - err := cmd.Run() - if err == nil { - return nil - } - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return fmt.Errorf( - "%s exited with %d: %s", - cmd.Path, - status.ExitStatus(), - buf.String()) - } - } - - return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) -} - -// getForcedGetter takes a source and returns the tuple of the forced -// getter and the raw URL (without the force syntax). -func getForcedGetter(src string) (string, string) { - var forced string - if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { - forced = ms[1] - src = ms[2] - } - - return forced, src -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go deleted file mode 100644 index e5d2d61..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ /dev/null @@ -1,32 +0,0 @@ -package getter - -import ( - "net/url" - "os" -) - -// FileGetter is a Getter implementation that will download a module from -// a file scheme. -type FileGetter struct { - // Copy, if set to true, will copy data instead of using a symlink - Copy bool -} - -func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - fi, err := os.Stat(path) - if err != nil { - return 0, err - } - - // Check if the source is a directory. - if fi.IsDir() { - return ClientModeDir, nil - } - - return ClientModeFile, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go deleted file mode 100644 index c89a2d5..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build !windows - -package getter - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" -) - -func (g *FileGetter) Get(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - return os.Symlink(path, dst) -} - -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a file to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if fi.IsDir() { - return fmt.Errorf("source path must be a file") - } - - _, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - // If we're not copying, just symlink and we're done - if !g.Copy { - return os.Symlink(path, dst) - } - - // Copy - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, srcF) - return err -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go deleted file mode 100644 index f87ed0a..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build windows - -package getter - -import ( - "fmt" - "io" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" -) - -func (g *FileGetter) Get(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - sourcePath := toBackslash(path) - - // Use mklink to create a junction point - output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) - } - - return nil -} - -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if fi.IsDir() { - return fmt.Errorf("source path must be a file") - } - - _, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - // If we're not copying, just symlink and we're done - if !g.Copy { - return os.Symlink(path, dst) - } - - // Copy - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, srcF) - return err -} - -// toBackslash returns the result of replacing each slash character -// in path with a backslash ('\') character. Multiple separators are -// replaced by multiple backslashes. -func toBackslash(path string) string { - return strings.Replace(path, "/", "\\", -1) -} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go deleted file mode 100644 index 0728139..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ /dev/null @@ -1,225 +0,0 @@ -package getter - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - urlhelper "github.com/hashicorp/go-getter/helper/url" - "github.com/hashicorp/go-version" -) - -// GitGetter is a Getter implementation that will download a module from -// a git repository. -type GitGetter struct{} - -func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil -} - -func (g *GitGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("git"); err != nil { - return fmt.Errorf("git must be available and on the PATH") - } - - // Extract some query parameters we use - var ref, sshKey string - q := u.Query() - if len(q) > 0 { - ref = q.Get("ref") - q.Del("ref") - - sshKey = q.Get("sshkey") - q.Del("sshkey") - - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() - } - - var sshKeyFile string - if sshKey != "" { - // Check that the git version is sufficiently new. - if err := checkGitVersion("2.3"); err != nil { - return fmt.Errorf("Error using ssh key: %v", err) - } - - // We have an SSH key - decode it. - raw, err := base64.StdEncoding.DecodeString(sshKey) - if err != nil { - return err - } - - // Create a temp file for the key and ensure it is removed. - fh, err := ioutil.TempFile("", "go-getter") - if err != nil { - return err - } - sshKeyFile = fh.Name() - defer os.Remove(sshKeyFile) - - // Set the permissions prior to writing the key material. - if err := os.Chmod(sshKeyFile, 0600); err != nil { - return err - } - - // Write the raw key into the temp file. - _, err = fh.Write(raw) - fh.Close() - if err != nil { - return err - } - } - - // Clone or update the repository - _, err := os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - err = g.update(dst, sshKeyFile, ref) - } else { - err = g.clone(dst, sshKeyFile, u) - } - if err != nil { - return err - } - - // Next: check out the proper tag/branch if it is specified, and checkout - if ref != "" { - if err := g.checkout(dst, ref); err != nil { - return err - } - } - - // Lastly, download any/all submodules. - return g.fetchSubmodules(dst, sshKeyFile) -} - -// GetFile for Git doesn't support updating at this time. It will download -// the file every time. -func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-git") - if err != nil { - return err - } - if err := os.RemoveAll(td); err != nil { - return err - } - - // Get the filename, and strip the filename from the URL so we can - // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.Dir(u.Path) - - // Get the full repository - if err := g.Get(td, u); err != nil { - return err - } - - // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) - if err != nil { - return err - } - - fg := &FileGetter{Copy: true} - return fg.GetFile(dst, u) -} - -func (g *GitGetter) checkout(dst string, ref string) error { - cmd := exec.Command("git", "checkout", ref) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { - cmd := exec.Command("git", "clone", u.String(), dst) - setupGitEnv(cmd, sshKeyFile) - return getRunCommand(cmd) -} - -func (g *GitGetter) update(dst, sshKeyFile, ref string) error { - // Determine if we're a branch. If we're NOT a branch, then we just - // switch to master prior to checking out - cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) - cmd.Dir = dst - - if getRunCommand(cmd) != nil { - // Not a branch, switch to master. This will also catch non-existent - // branches, in which case we want to switch to master and then - // checkout the proper branch later. - ref = "master" - } - - // We have to be on a branch to pull - if err := g.checkout(dst, ref); err != nil { - return err - } - - cmd = exec.Command("git", "pull", "--ff-only") - cmd.Dir = dst - setupGitEnv(cmd, sshKeyFile) - return getRunCommand(cmd) -} - -// fetchSubmodules downloads any configured submodules recursively. -func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { - cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") - cmd.Dir = dst - setupGitEnv(cmd, sshKeyFile) - return getRunCommand(cmd) -} - -// setupGitEnv sets up the environment for the given command. This is used to -// pass configuration data to git and ssh and enables advanced cloning methods. -func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { - var sshOpts []string - - if sshKeyFile != "" { - // We have an SSH key temp file configured, tell ssh about this. - sshOpts = append(sshOpts, "-i", sshKeyFile) - } - - cmd.Env = append(os.Environ(), - // Set the ssh command to use for clones. - "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), - ) -} - -// checkGitVersion is used to check the version of git installed on the system -// against a known minimum version. Returns an error if the installed version -// is older than the given minimum. -func checkGitVersion(min string) error { - want, err := version.NewVersion(min) - if err != nil { - return err - } - - out, err := exec.Command("git", "version").Output() - if err != nil { - return err - } - - fields := strings.Fields(string(out)) - if len(fields) != 3 { - return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) - } - - have, err := version.NewVersion(fields[2]) - if err != nil { - return err - } - - if have.LessThan(want) { - return fmt.Errorf("Required git version = %s, have %s", want, have) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go deleted file mode 100644 index 820bdd4..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ /dev/null @@ -1,131 +0,0 @@ -package getter - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path/filepath" - "runtime" - - urlhelper "github.com/hashicorp/go-getter/helper/url" -) - -// HgGetter is a Getter implementation that will download a module from -// a Mercurial repository. -type HgGetter struct{} - -func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil -} - -func (g *HgGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("hg"); err != nil { - return fmt.Errorf("hg must be available and on the PATH") - } - - newURL, err := urlhelper.Parse(u.String()) - if err != nil { - return err - } - if fixWindowsDrivePath(newURL) { - // See valid file path form on http://www.selenic.com/hg/help/urls - newURL.Path = fmt.Sprintf("/%s", newURL.Path) - } - - // Extract some query parameters we use - var rev string - q := newURL.Query() - if len(q) > 0 { - rev = q.Get("rev") - q.Del("rev") - - newURL.RawQuery = q.Encode() - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err != nil { - if err := g.clone(dst, newURL); err != nil { - return err - } - } - - if err := g.pull(dst, newURL); err != nil { - return err - } - - return g.update(dst, newURL, rev) -} - -// GetFile for Hg doesn't support updating at this time. It will download -// the file every time. -func (g *HgGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-hg") - if err != nil { - return err - } - if err := os.RemoveAll(td); err != nil { - return err - } - - // Get the filename, and strip the filename from the URL so we can - // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.ToSlash(filepath.Dir(u.Path)) - - // If we're on Windows, we need to set the host to "localhost" for hg - if runtime.GOOS == "windows" { - u.Host = "localhost" - } - - // Get the full repository - if err := g.Get(td, u); err != nil { - return err - } - - // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) - if err != nil { - return err - } - - fg := &FileGetter{Copy: true} - return fg.GetFile(dst, u) -} - -func (g *HgGetter) clone(dst string, u *url.URL) error { - cmd := exec.Command("hg", "clone", "-U", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *HgGetter) pull(dst string, u *url.URL) error { - cmd := exec.Command("hg", "pull") - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *HgGetter) update(dst string, u *url.URL, rev string) error { - args := []string{"update"} - if rev != "" { - args = append(args, rev) - } - - cmd := exec.Command("hg", args...) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func fixWindowsDrivePath(u *url.URL) bool { - // hg assumes a file:/// prefix for Windows drive letter file paths. - // (e.g. file:///c:/foo/bar) - // If the URL Path does not begin with a '/' character, the resulting URL - // path will have a file:// prefix. (e.g. file://c:/foo/bar) - // See http://www.selenic.com/hg/help/urls and the examples listed in - // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 - return runtime.GOOS == "windows" && u.Scheme == "file" && - len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' -} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go deleted file mode 100644 index 661d898..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ /dev/null @@ -1,227 +0,0 @@ -package getter - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" -) - -// HttpGetter is a Getter implementation that will download from an HTTP -// endpoint. -// -// For file downloads, HTTP is used directly. -// -// The protocol for downloading a directory from an HTTP endpoing is as follows: -// -// An HTTP GET request is made to the URL with the additional GET parameter -// "terraform-get=1". This lets you handle that scenario specially if you -// wish. The response must be a 2xx. -// -// First, a header is looked for "X-Terraform-Get" which should contain -// a source URL to download. -// -// If the header is not present, then a meta tag is searched for named -// "terraform-get" and the content should be a source URL. -// -// The source URL, whether from the header or meta tag, must be a fully -// formed URL. The shorthand syntax of "github.com/foo/bar" or relative -// paths are not allowed. -type HttpGetter struct { - // Netrc, if true, will lookup and use auth information found - // in the user's netrc file if available. - Netrc bool -} - -func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { - if strings.HasSuffix(u.Path, "/") { - return ClientModeDir, nil - } - return ClientModeFile, nil -} - -func (g *HttpGetter) Get(dst string, u *url.URL) error { - // Copy the URL so we can modify it - var newU url.URL = *u - u = &newU - - if g.Netrc { - // Add auth from netrc if we can - if err := addAuthFromNetrc(u); err != nil { - return err - } - } - - // Add terraform-get to the parameter. - q := u.Query() - q.Add("terraform-get", "1") - u.RawQuery = q.Encode() - - // Get the URL - resp, err := http.Get(u.String()) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - // Extract the source URL - var source string - if v := resp.Header.Get("X-Terraform-Get"); v != "" { - source = v - } else { - source, err = g.parseMeta(resp.Body) - if err != nil { - return err - } - } - if source == "" { - return fmt.Errorf("no source URL was returned") - } - - // If there is a subdir component, then we download the root separately - // into a temporary directory, then copy over the proper subdir. - source, subDir := SourceDirSubdir(source) - if subDir == "" { - return Get(dst, source) - } - - // We have a subdir, time to jump some hoops - return g.getSubdir(dst, source, subDir) -} - -func (g *HttpGetter) GetFile(dst string, u *url.URL) error { - - if g.Netrc { - // Add auth from netrc if we can - if err := addAuthFromNetrc(u); err != nil { - return err - } - } - - resp, err := http.Get(u.String()) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - f, err := os.Create(dst) - if err != nil { - return err - } - defer f.Close() - - _, err = io.Copy(f, resp.Body) - return err -} - -// getSubdir downloads the source into the destination, but with -// the proper subdir. -func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - defer os.RemoveAll(td) - - // Download that into the given directory - if err := Get(td, source); err != nil { - return err - } - - // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) - if _, err := os.Stat(sourcePath); err != nil { - return fmt.Errorf( - "Error downloading %s: %s", source, err) - } - - // Copy the subdirectory into our actual destination. - if err := os.RemoveAll(dst); err != nil { - return err - } - - // Make the final destination - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - return copyDir(dst, sourcePath, false) -} - -// parseMeta looks for the first meta tag in the given reader that -// will give us the source URL. -func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { - d := xml.NewDecoder(r) - d.CharsetReader = charsetReader - d.Strict = false - var err error - var t xml.Token - for { - t, err = d.Token() - if err != nil { - if err == io.EOF { - err = nil - } - return "", err - } - if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { - return "", nil - } - if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { - return "", nil - } - e, ok := t.(xml.StartElement) - if !ok || !strings.EqualFold(e.Name.Local, "meta") { - continue - } - if attrValue(e.Attr, "name") != "terraform-get" { - continue - } - if f := attrValue(e.Attr, "content"); f != "" { - return f, nil - } - } -} - -// attrValue returns the attribute value for the case-insensitive key -// `name', or the empty string if nothing is found. -func attrValue(attrs []xml.Attr, name string) string { - for _, a := range attrs { - if strings.EqualFold(a.Name.Local, name) { - return a.Value - } - } - return "" -} - -// charsetReader returns a reader for the given charset. Currently -// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful -// error which is printed by go get, so the user can find why the package -// wasn't downloaded if the encoding is not supported. Note that, in -// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters -// greater than 0x7f are not rejected). -func charsetReader(charset string, input io.Reader) (io.Reader, error) { - switch strings.ToLower(charset) { - case "ascii": - return input, nil - default: - return nil, fmt.Errorf("can't decode XML document using charset %q", charset) - } -} diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go deleted file mode 100644 index 882e694..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ /dev/null @@ -1,52 +0,0 @@ -package getter - -import ( - "net/url" -) - -// MockGetter is an implementation of Getter that can be used for tests. -type MockGetter struct { - // Proxy, if set, will be called after recording the calls below. - // If it isn't set, then the *Err values will be returned. - Proxy Getter - - GetCalled bool - GetDst string - GetURL *url.URL - GetErr error - - GetFileCalled bool - GetFileDst string - GetFileURL *url.URL - GetFileErr error -} - -func (g *MockGetter) Get(dst string, u *url.URL) error { - g.GetCalled = true - g.GetDst = dst - g.GetURL = u - - if g.Proxy != nil { - return g.Proxy.Get(dst, u) - } - - return g.GetErr -} - -func (g *MockGetter) GetFile(dst string, u *url.URL) error { - g.GetFileCalled = true - g.GetFileDst = dst - g.GetFileURL = u - - if g.Proxy != nil { - return g.Proxy.GetFile(dst, u) - } - return g.GetFileErr -} - -func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { - if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { - return ClientModeDir, nil - } - return ClientModeFile, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go deleted file mode 100644 index d3bffeb..0000000 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ /dev/null @@ -1,243 +0,0 @@ -package getter - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" -) - -// S3Getter is a Getter implementation that will download a module from -// a S3 bucket. -type S3Getter struct{} - -func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { - // Parse URL - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return 0, err - } - - // Create client config - config := g.getAWSConfig(region, creds) - sess := session.New(config) - client := s3.New(sess) - - // List the object(s) at the given prefix - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - resp, err := client.ListObjects(req) - if err != nil { - return 0, err - } - - for _, o := range resp.Contents { - // Use file mode on exact match. - if *o.Key == path { - return ClientModeFile, nil - } - - // Use dir mode if child keys are found. - if strings.HasPrefix(*o.Key, path+"/") { - return ClientModeDir, nil - } - } - - // There was no match, so just return file mode. The download is going - // to fail but we will let S3 return the proper error later. - return ClientModeFile, nil -} - -func (g *S3Getter) Get(dst string, u *url.URL) error { - // Parse URL - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - // Remove destination if it already exists - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - if err == nil { - // Remove the destination - if err := os.RemoveAll(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - config := g.getAWSConfig(region, creds) - sess := session.New(config) - client := s3.New(sess) - - // List files in path, keep listing until no more objects are found - lastMarker := "" - hasMore := true - for hasMore { - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - if lastMarker != "" { - req.Marker = aws.String(lastMarker) - } - - resp, err := client.ListObjects(req) - if err != nil { - return err - } - - hasMore = aws.BoolValue(resp.IsTruncated) - - // Get each object storing each file relative to the destination path - for _, object := range resp.Contents { - lastMarker = aws.StringValue(object.Key) - objPath := aws.StringValue(object.Key) - - // If the key ends with a backslash assume it is a directory and ignore - if strings.HasSuffix(objPath, "/") { - continue - } - - // Get the object destination path - objDst, err := filepath.Rel(path, objPath) - if err != nil { - return err - } - objDst = filepath.Join(dst, objDst) - - if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { - return err - } - } - } - - return nil -} - -func (g *S3Getter) GetFile(dst string, u *url.URL) error { - region, bucket, path, version, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - config := g.getAWSConfig(region, creds) - sess := session.New(config) - client := s3.New(sess) - return g.getObject(client, dst, bucket, path, version) -} - -func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { - req := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if version != "" { - req.VersionId = aws.String(version) - } - - resp, err := client.GetObject(req) - if err != nil { - return err - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - f, err := os.Create(dst) - if err != nil { - return err - } - defer f.Close() - - _, err = io.Copy(f, resp.Body) - return err -} - -func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { - conf := &aws.Config{} - if creds == nil { - // Grab the metadata URL - metadataURL := os.Getenv("AWS_METADATA_URL") - if metadataURL == "" { - metadataURL = "http://169.254.169.254:80/latest" - } - - creds = credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New(&aws.Config{ - Endpoint: aws.String(metadataURL), - })), - }, - }) - } - - conf.Credentials = creds - if region != "" { - conf.Region = aws.String(region) - } - - return conf -} - -func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // Expected host style: s3.amazonaws.com. They always have 3 parts, - // although the first may differ if we're accessing a specific region. - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } - - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") - - _, hasAwsId := u.Query()["aws_access_key_id"] - _, hasAwsSecret := u.Query()["aws_access_key_secret"] - _, hasAwsToken := u.Query()["aws_access_token"] - if hasAwsId || hasAwsSecret || hasAwsToken { - creds = credentials.NewStaticCredentials( - u.Query().Get("aws_access_key_id"), - u.Query().Get("aws_access_key_secret"), - u.Query().Get("aws_access_token"), - ) - } - - return -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go deleted file mode 100644 index 02497c2..0000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url.go +++ /dev/null @@ -1,14 +0,0 @@ -package url - -import ( - "net/url" -) - -// Parse parses rawURL into a URL structure. -// The rawURL may be relative or absolute. -// -// Parse is a wrapper for the Go stdlib net/url Parse function, but returns -// Windows "safe" URLs on Windows platforms. -func Parse(rawURL string) (*url.URL, error) { - return parse(rawURL) -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go deleted file mode 100644 index ed1352a..0000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package url - -import ( - "net/url" -) - -func parse(rawURL string) (*url.URL, error) { - return url.Parse(rawURL) -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go deleted file mode 100644 index 4655226..0000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package url - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" -) - -func parse(rawURL string) (*url.URL, error) { - // Make sure we're using "/" since URLs are "/"-based. - rawURL = filepath.ToSlash(rawURL) - - u, err := url.Parse(rawURL) - if err != nil { - return nil, err - } - - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a drive letter file path where the drive - // letter has been parsed into the URL Scheme, and the rest of the path - // has been parsed into the URL Path without the leading ':' character. - u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) - u.Scheme = "" - } - - if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { - // Assume we're dealing with a drive letter file path where the drive - // letter has been parsed into the URL Host. - u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) - u.Host = "" - } - - // Remove leading slash for absolute file paths. - if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { - u.Path = u.Path[1:] - } - - return u, err -} diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go deleted file mode 100644 index c7f6a3f..0000000 --- a/vendor/github.com/hashicorp/go-getter/netrc.go +++ /dev/null @@ -1,67 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "os" - "runtime" - - "github.com/bgentry/go-netrc/netrc" - "github.com/mitchellh/go-homedir" -) - -// addAuthFromNetrc adds auth information to the URL from the user's -// netrc file if it can be found. This will only add the auth info -// if the URL doesn't already have auth info specified and the -// the username is blank. -func addAuthFromNetrc(u *url.URL) error { - // If the URL already has auth information, do nothing - if u.User != nil && u.User.Username() != "" { - return nil - } - - // Get the netrc file path - path := os.Getenv("NETRC") - if path == "" { - filename := ".netrc" - if runtime.GOOS == "windows" { - filename = "_netrc" - } - - var err error - path, err = homedir.Expand("~/" + filename) - if err != nil { - return err - } - } - - // If the file is not a file, then do nothing - if fi, err := os.Stat(path); err != nil { - // File doesn't exist, do nothing - if os.IsNotExist(err) { - return nil - } - - // Some other error! - return err - } else if fi.IsDir() { - // File is directory, ignore - return nil - } - - // Load up the netrc file - net, err := netrc.ParseFile(path) - if err != nil { - return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) - } - - machine := net.FindMachine(u.Host) - if machine == nil { - // Machine not found, no problem - return nil - } - - // Set the user info - u.User = url.UserPassword(machine.Login, machine.Password) - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go deleted file mode 100644 index 4d5ee3c..0000000 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ /dev/null @@ -1,36 +0,0 @@ -package getter - -import ( - "strings" -) - -// SourceDirSubdir takes a source and returns a tuple of the URL without -// the subdir and the URL with the subdir. -func SourceDirSubdir(src string) (string, string) { - // Calcaulate an offset to avoid accidentally marking the scheme - // as the dir. - var offset int - if idx := strings.Index(src, "://"); idx > -1 { - offset = idx + 3 - } - - // First see if we even have an explicit subdir - idx := strings.Index(src[offset:], "//") - if idx == -1 { - return src, "" - } - - idx += offset - subdir := src[idx+2:] - src = src[:idx] - - // Next, check if we have query parameters and push them onto the - // URL. - if idx = strings.Index(subdir, "?"); idx > -1 { - query := subdir[idx:] - subdir = subdir[:idx] - src += query - } - - return src, subdir -} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go deleted file mode 100644 index 2bc6b9e..0000000 --- a/vendor/github.com/hashicorp/go-getter/storage.go +++ /dev/null @@ -1,13 +0,0 @@ -package getter - -// Storage is an interface that knows how to lookup downloaded directories -// as well as download and update directories from their sources into the -// proper location. -type Storage interface { - // Dir returns the directory on local disk where the directory source - // can be loaded from. - Dir(string) (string, bool, error) - - // Get will download and optionally update the given directory. - Get(string, string, bool) error -} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE deleted file mode 100644 index abaf1e4..0000000 --- a/vendor/github.com/hashicorp/go-hclog/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 HashiCorp - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md deleted file mode 100644 index 614342b..0000000 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# go-hclog - -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[godocs]: https://godoc.org/github.com/hashicorp/go-hclog - -`go-hclog` is a package for Go that provides a simple key/value logging -interface for use in development and production environments. - -It provides logging levels that provide decreased output based upon the -desired amount of output, unlike the standard library `log` package. - -It does not provide `Printf` style logging, only key/value logging that is -exposed as arguments to the logging functions for simplicity. - -It provides a human readable output mode for use in development as well as -JSON output mode for production. - -## Stability Note - -While this library is fully open source and HashiCorp will be maintaining it -(since we are and will be making extensive use of it), the API and output -format is subject to minor changes as we fully bake and vet it in our projects. -This notice will be removed once it's fully integrated into our major projects -and no further changes are anticipated. - -## Installation and Docs - -Install using `go get github.com/hashicorp/go-hclog`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/go-hclog - -## Usage - -### Use the global logger - -```go -hclog.Default().Info("hello world") -``` - -```text -2017-07-05T16:15:55.167-0700 [INFO ] hello world -``` - -(Note timestamps are removed in future examples for brevity.) - -### Create a new logger - -```go -appLogger := hclog.New(&hclog.LoggerOptions{ - Name: "my-app", - Level: hclog.LevelFromString("DEBUG"), -}) -``` - -### Emit an Info level message with 2 key/value pairs - -```go -input := "5.5" -_, err := strconv.ParseInt(input, 10, 32) -if err != nil { - appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) -} -``` - -```text -... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" -``` - -### Create a new Logger for a major subsystem - -```go -subsystemLogger := appLogger.Named("transport") -subsystemLogger.Info("we are transporting something") -``` - -```text -... [INFO ] my-app.transport: we are transporting something -``` - -Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, -reflecting both the application and subsystem names. - -### Create a new Logger with fixed key/value pairs - -Using `With()` will include a specific key-value pair in all messages emitted -by that logger. - -```go -requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" -requestLogger := subsystemLogger.With("request", requestID) -requestLogger.Info("we are transporting a request") -``` - -```text -... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 -``` - -This allows sub Loggers to be context specific without having to thread that -into all the callers. - -### Use this with code that uses the standard library logger - -If you want to use the standard library's `log.Logger` interface you can wrap -`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use -it with the familiar `Println()`, `Printf()`, etc. For example: - -```go -stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ - InferLevels: true, -}) -// Printf() is provided by stdlib log.Logger interface, not hclog.Logger -stdLogger.Printf("[DEBUG] %+v", stdLogger) -``` - -```text -... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} -``` - -Notice that if `appLogger` is initialized with the `INFO` log level _and_ you -specify `InferLevels: true`, you will not see any output here. You must change -`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go deleted file mode 100644 index 55ce439..0000000 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ /dev/null @@ -1,34 +0,0 @@ -package hclog - -import ( - "sync" -) - -var ( - protect sync.Once - def Logger - - // The options used to create the Default logger. These are - // read only when the Default logger is created, so set them - // as soon as the process starts. - DefaultOptions = &LoggerOptions{ - Level: DefaultLevel, - Output: DefaultOutput, - } -) - -// Return a logger that is held globally. This can be a good starting -// place, and then you can use .With() and .Name() to create sub-loggers -// to be used in more specific contexts. -func Default() Logger { - protect.Do(func() { - def = New(DefaultOptions) - }) - - return def -} - -// A short alias for Default() -func L() Logger { - return Default() -} diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go deleted file mode 100644 index 9f90c28..0000000 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ /dev/null @@ -1,385 +0,0 @@ -package hclog - -import ( - "bufio" - "encoding/json" - "fmt" - "log" - "os" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -var ( - _levelToBracket = map[Level]string{ - Debug: "[DEBUG]", - Trace: "[TRACE]", - Info: "[INFO ]", - Warn: "[WARN ]", - Error: "[ERROR]", - } -) - -// Given the options (nil for defaults), create a new Logger -func New(opts *LoggerOptions) Logger { - if opts == nil { - opts = &LoggerOptions{} - } - - output := opts.Output - if output == nil { - output = os.Stderr - } - - level := opts.Level - if level == NoLevel { - level = DefaultLevel - } - - return &intLogger{ - m: new(sync.Mutex), - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - w: bufio.NewWriter(output), - level: level, - } -} - -// The internal logger implementation. Internal in that it is defined entirely -// by this package. -type intLogger struct { - json bool - caller bool - name string - - // this is a pointer so that it's shared by any derived loggers, since - // those derived loggers share the bufio.Writer as well. - m *sync.Mutex - w *bufio.Writer - level Level - - implied []interface{} -} - -// Make sure that intLogger is a Logger -var _ Logger = &intLogger{} - -// The time format to use for logging. This is a version of RFC3339 that -// contains millisecond precision -const TimeFormat = "2006-01-02T15:04:05.000Z0700" - -// Log a message and a set of key/value pairs if the given level is at -// or more severe that the threshold configured in the Logger. -func (z *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < z.level { - return - } - - t := time.Now() - - z.m.Lock() - defer z.m.Unlock() - - if z.json { - z.logJson(t, level, msg, args...) - } else { - z.log(t, level, msg, args...) - } - - z.w.Flush() -} - -// Cleanup a path by returning the last 2 segments of the path only. -func trimCallerPath(path string) string { - // lovely borrowed from zap - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - - // Find the last separator. - // - idx := strings.LastIndexByte(path, '/') - if idx == -1 { - return path - } - - // Find the penultimate separator. - idx = strings.LastIndexByte(path[:idx], '/') - if idx == -1 { - return path - } - - return path[idx+1:] -} - -// Non-JSON logging format function -func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - z.w.WriteString(t.Format(TimeFormat)) - z.w.WriteByte(' ') - - s, ok := _levelToBracket[level] - if ok { - z.w.WriteString(s) - } else { - z.w.WriteString("[UNKN ]") - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - z.w.WriteByte(' ') - z.w.WriteString(trimCallerPath(file)) - z.w.WriteByte(':') - z.w.WriteString(strconv.Itoa(line)) - z.w.WriteByte(':') - } - } - - z.w.WriteByte(' ') - - if z.name != "" { - z.w.WriteString(z.name) - z.w.WriteString(": ") - } - - z.w.WriteString(msg) - - args = append(z.implied, args...) - - var stacktrace CapturedStacktrace - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - stacktrace = cs - } else { - args = append(args, "") - } - } - - z.w.WriteByte(':') - - FOR: - for i := 0; i < len(args); i = i + 2 { - var val string - - switch st := args[i+1].(type) { - case string: - val = st - case int: - val = strconv.FormatInt(int64(st), 10) - case int64: - val = strconv.FormatInt(int64(st), 10) - case int32: - val = strconv.FormatInt(int64(st), 10) - case int16: - val = strconv.FormatInt(int64(st), 10) - case int8: - val = strconv.FormatInt(int64(st), 10) - case uint: - val = strconv.FormatUint(uint64(st), 10) - case uint64: - val = strconv.FormatUint(uint64(st), 10) - case uint32: - val = strconv.FormatUint(uint64(st), 10) - case uint16: - val = strconv.FormatUint(uint64(st), 10) - case uint8: - val = strconv.FormatUint(uint64(st), 10) - case CapturedStacktrace: - stacktrace = st - continue FOR - default: - val = fmt.Sprintf("%v", st) - } - - z.w.WriteByte(' ') - z.w.WriteString(args[i].(string)) - z.w.WriteByte('=') - - if strings.ContainsAny(val, " \t\n\r") { - z.w.WriteByte('"') - z.w.WriteString(val) - z.w.WriteByte('"') - } else { - z.w.WriteString(val) - } - } - } - - z.w.WriteString("\n") - - if stacktrace != "" { - z.w.WriteString(string(stacktrace)) - } -} - -// JSON logging function -func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { - vals := map[string]interface{}{ - "@message": msg, - "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), - } - - var levelStr string - switch level { - case Error: - levelStr = "error" - case Warn: - levelStr = "warn" - case Info: - levelStr = "info" - case Debug: - levelStr = "debug" - case Trace: - levelStr = "trace" - default: - levelStr = "all" - } - - vals["@level"] = levelStr - - if z.name != "" { - vals["@module"] = z.name - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - vals["@caller"] = fmt.Sprintf("%s:%d", file, line) - } - } - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - vals["stacktrace"] = cs - } else { - args = append(args, "") - } - } - - for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } - vals[args[i].(string)] = args[i+1] - } - } - - err := json.NewEncoder(z.w).Encode(vals) - if err != nil { - panic(err) - } -} - -// Emit the message and args at DEBUG level -func (z *intLogger) Debug(msg string, args ...interface{}) { - z.Log(Debug, msg, args...) -} - -// Emit the message and args at TRACE level -func (z *intLogger) Trace(msg string, args ...interface{}) { - z.Log(Trace, msg, args...) -} - -// Emit the message and args at INFO level -func (z *intLogger) Info(msg string, args ...interface{}) { - z.Log(Info, msg, args...) -} - -// Emit the message and args at WARN level -func (z *intLogger) Warn(msg string, args ...interface{}) { - z.Log(Warn, msg, args...) -} - -// Emit the message and args at ERROR level -func (z *intLogger) Error(msg string, args ...interface{}) { - z.Log(Error, msg, args...) -} - -// Indicate that the logger would emit TRACE level logs -func (z *intLogger) IsTrace() bool { - return z.level == Trace -} - -// Indicate that the logger would emit DEBUG level logs -func (z *intLogger) IsDebug() bool { - return z.level <= Debug -} - -// Indicate that the logger would emit INFO level logs -func (z *intLogger) IsInfo() bool { - return z.level <= Info -} - -// Indicate that the logger would emit WARN level logs -func (z *intLogger) IsWarn() bool { - return z.level <= Warn -} - -// Indicate that the logger would emit ERROR level logs -func (z *intLogger) IsError() bool { - return z.level <= Error -} - -// Return a sub-Logger for which every emitted log message will contain -// the given key/value pairs. This is used to create a context specific -// Logger. -func (z *intLogger) With(args ...interface{}) Logger { - var nz intLogger = *z - - nz.implied = append(nz.implied, args...) - - return &nz -} - -// Create a new sub-Logger that a name decending from the current name. -// This is used to create a subsystem specific Logger. -func (z *intLogger) Named(name string) Logger { - var nz intLogger = *z - - if nz.name != "" { - nz.name = nz.name + "." + name - } - - return &nz -} - -// Create a new sub-Logger with an explicit name. This ignores the current -// name. This is used to create a standalone logger that doesn't fall -// within the normal hierarchy. -func (z *intLogger) ResetNamed(name string) Logger { - var nz intLogger = *z - - nz.name = name - - return &nz -} - -// Create a *log.Logger that will send it's data through this Logger. This -// allows packages that expect to be using the standard library log to actually -// use this logger. -func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - if opts == nil { - opts = &StandardLoggerOptions{} - } - - return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) -} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go deleted file mode 100644 index 6bb16ba..0000000 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ /dev/null @@ -1,138 +0,0 @@ -package hclog - -import ( - "io" - "log" - "os" - "strings" -) - -var ( - DefaultOutput = os.Stderr - DefaultLevel = Info -) - -type Level int - -const ( - // This is a special level used to indicate that no level has been - // set and allow for a default to be used. - NoLevel Level = 0 - - // The most verbose level. Intended to be used for the tracing of actions - // in code, such as function enters/exits, etc. - Trace Level = 1 - - // For programmer lowlevel analysis. - Debug Level = 2 - - // For information about steady state operations. - Info Level = 3 - - // For information about rare but handled events. - Warn Level = 4 - - // For information about unrecoverable events. - Error Level = 5 -) - -// LevelFromString returns a Level type for the named log level, or "NoLevel" if -// the level string is invalid. This facilitates setting the log level via -// config or environment variable by name in a predictable way. -func LevelFromString(levelStr string) Level { - // We don't care about case. Accept "INFO" or "info" - levelStr = strings.ToLower(strings.TrimSpace(levelStr)) - switch levelStr { - case "trace": - return Trace - case "debug": - return Debug - case "info": - return Info - case "warn": - return Warn - case "error": - return Error - default: - return NoLevel - } -} - -// The main Logger interface. All code should code against this interface only. -type Logger interface { - // Args are alternating key, val pairs - // keys must be strings - // vals can be any type, but display is implementation specific - // Emit a message and key/value pairs at the TRACE level - Trace(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the DEBUG level - Debug(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the INFO level - Info(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the WARN level - Warn(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the ERROR level - Error(msg string, args ...interface{}) - - // Indicate if TRACE logs would be emitted. This and the other Is* guards - // are used to elide expensive logging code based on the current level. - IsTrace() bool - - // Indicate if DEBUG logs would be emitted. This and the other Is* guards - IsDebug() bool - - // Indicate if INFO logs would be emitted. This and the other Is* guards - IsInfo() bool - - // Indicate if WARN logs would be emitted. This and the other Is* guards - IsWarn() bool - - // Indicate if ERROR logs would be emitted. This and the other Is* guards - IsError() bool - - // Creates a sublogger that will always have the given key/value pairs - With(args ...interface{}) Logger - - // Create a logger that will prepend the name string on the front of all messages. - // If the logger already has a name, the new value will be appended to the current - // name. That way, a major subsystem can use this to decorate all it's own logs - // without losing context. - Named(name string) Logger - - // Create a logger that will prepend the name string on the front of all messages. - // This sets the name of the logger to the value directly, unlike Named which honor - // the current name as well. - ResetNamed(name string) Logger - - // Return a value that conforms to the stdlib log.Logger interface - StandardLogger(opts *StandardLoggerOptions) *log.Logger -} - -type StandardLoggerOptions struct { - // Indicate that some minimal parsing should be done on strings to try - // and detect their level and re-emit them. - // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], - // [DEBUG] and strip it off before reapplying it. - InferLevels bool -} - -type LoggerOptions struct { - // Name of the subsystem to prefix logs with - Name string - - // The threshold for the logger. Anything less severe is supressed - Level Level - - // Where to write the logs to. Defaults to os.Stdout if nil - Output io.Writer - - // Control if the output should be in JSON. - JSONFormat bool - - // Intclude file and line information in each log line - IncludeLocation bool -} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go deleted file mode 100644 index 8af1a3b..0000000 --- a/vendor/github.com/hashicorp/go-hclog/stacktrace.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package hclog - -import ( - "bytes" - "runtime" - "strconv" - "strings" - "sync" -) - -var ( - _stacktraceIgnorePrefixes = []string{ - "runtime.goexit", - "runtime.main", - } - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } -) - -// A stacktrace gathered by a previous call to log.Stacktrace. If passed -// to a logging function, the stacktrace will be appended. -type CapturedStacktrace string - -// Gather a stacktrace of the current goroutine and return it to be passed -// to a logging function. -func Stacktrace() CapturedStacktrace { - return CapturedStacktrace(takeStacktrace()) -} - -func takeStacktrace() string { - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var buffer bytes.Buffer - - for { - // Skip the call to runtime.Counters and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - n := runtime.Callers(2, programCounters.pcs) - if n < cap(programCounters.pcs) { - programCounters.pcs = programCounters.pcs[:n] - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - frames := runtime.CallersFrames(programCounters.pcs) - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if shouldIgnoreStacktraceFunction(frame.Function) { - continue - } - if i != 0 { - buffer.WriteByte('\n') - } - i++ - buffer.WriteString(frame.Function) - buffer.WriteByte('\n') - buffer.WriteByte('\t') - buffer.WriteString(frame.File) - buffer.WriteByte(':') - buffer.WriteString(strconv.Itoa(int(frame.Line))) - } - - return buffer.String() -} - -func shouldIgnoreStacktraceFunction(function string) bool { - for _, prefix := range _stacktraceIgnorePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - return false -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go deleted file mode 100644 index 2bb927f..0000000 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ /dev/null @@ -1,62 +0,0 @@ -package hclog - -import ( - "bytes" - "strings" -) - -// Provides a io.Writer to shim the data out of *log.Logger -// and back into our Logger. This is basically the only way to -// build upon *log.Logger. -type stdlogAdapter struct { - hl Logger - inferLevels bool -} - -// Take the data, infer the levels if configured, and send it through -// a regular Logger -func (s *stdlogAdapter) Write(data []byte) (int, error) { - str := string(bytes.TrimRight(data, " \t\n")) - - if s.inferLevels { - level, str := s.pickLevel(str) - switch level { - case Trace: - s.hl.Trace(str) - case Debug: - s.hl.Debug(str) - case Info: - s.hl.Info(str) - case Warn: - s.hl.Warn(str) - case Error: - s.hl.Error(str) - default: - s.hl.Info(str) - } - } else { - s.hl.Info(str) - } - - return len(data), nil -} - -// Detect, based on conventions, what log level this is -func (s *stdlogAdapter) pickLevel(str string) (Level, string) { - switch { - case strings.HasPrefix(str, "[DEBUG]"): - return Debug, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[TRACE]"): - return Trace, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[INFO]"): - return Info, strings.TrimSpace(str[6:]) - case strings.HasPrefix(str, "[WARN]"): - return Warn, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[ERROR]"): - return Error, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[ERR]"): - return Error, strings.TrimSpace(str[5:]) - default: - return Info, str - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE deleted file mode 100644 index 82b4de9..0000000 --- a/vendor/github.com/hashicorp/go-multierror/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile deleted file mode 100644 index b97cd6e..0000000 --- a/vendor/github.com/hashicorp/go-multierror/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code. -test: generate - @echo "==> Running tests..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} - -# testrace runs the race checker -testrace: generate - @echo "==> Running tests (race)..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -race ${TESTARGS} - -# updatedeps installs all the dependencies needed to run and build. -updatedeps: - @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" - -# generate runs `go generate` to build the dynamically generated source files. -generate: - @echo "==> Generating..." - @find . -type f -name '.DS_Store' -delete - @go list ./... \ - | grep -v "/vendor/" \ - | xargs -n1 go generate - -.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md deleted file mode 100644 index ead5830..0000000 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# go-multierror - -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror - -`go-multierror` is a package for Go that provides a mechanism for -representing a list of `error` values as a single `error`. - -This allows a function in Go to return an `error` that might actually -be a list of errors. If the caller knows this, they can unwrap the -list and access the errors. If the caller doesn't know, the error -formats to a nice human-readable format. - -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. - -## Installation and Docs - -Install using `go get github.com/hashicorp/go-multierror`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror - -## Usage - -go-multierror is easy to use and purposely built to be unobtrusive in -existing Go applications/libraries that may not be aware of it. - -**Building a list of errors** - -The `Append` function is used to create a list of errors. This function -behaves a lot like the Go built-in `append` function: it doesn't matter -if the first argument is nil, a `multierror.Error`, or any other `error`, -the function behaves as you would expect. - -```go -var result error - -if err := step1(); err != nil { - result = multierror.Append(result, err) -} -if err := step2(); err != nil { - result = multierror.Append(result, err) -} - -return result -``` - -**Customizing the formatting of the errors** - -By specifying a custom `ErrorFormat`, you can customize the format -of the `Error() string` function: - -```go -var result *multierror.Error - -// ... accumulate errors here, maybe using Append - -if result != nil { - result.ErrorFormat = func([]error) string { - return "errors!" - } -} -``` - -**Accessing the list of errors** - -`multierror.Error` implements `error` so if the caller doesn't know about -multierror, it will work just fine. But if you're aware a multierror might -be returned, you can use type switches to access the list of errors: - -```go -if err := something(); err != nil { - if merr, ok := err.(*multierror.Error); ok { - // Use merr.Errors - } -} -``` - -**Returning a multierror only if there are errors** - -If you build a `multierror.Error`, you can use the `ErrorOrNil` function -to return an `error` implementation only if there are errors to return: - -```go -var result *multierror.Error - -// ... accumulate errors here - -// Return the `error` only if errors were added to the multierror, otherwise -// return nil since there are no errors. -return result.ErrorOrNil() -``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go deleted file mode 100644 index 775b6e7..0000000 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ /dev/null @@ -1,41 +0,0 @@ -package multierror - -// Append is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -func Append(err error, errs ...error) *Error { - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *Error: - if e != nil { - err.Errors = append(err.Errors, e.Errors...) - } - default: - if e != nil { - err.Errors = append(err.Errors, e) - } - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return Append(&Error{}, newErrs...) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go deleted file mode 100644 index aab8e9a..0000000 --- a/vendor/github.com/hashicorp/go-multierror/flatten.go +++ /dev/null @@ -1,26 +0,0 @@ -package multierror - -// Flatten flattens the given error, merging any *Errors together into -// a single *Error. -func Flatten(err error) error { - // If it isn't an *Error, just return the error as-is - if _, ok := err.(*Error); !ok { - return err - } - - // Otherwise, make the result and flatten away! - flatErr := new(Error) - flatten(err, flatErr) - return flatErr -} - -func flatten(err error, flatErr *Error) { - switch err := err.(type) { - case *Error: - for _, e := range err.Errors { - flatten(e, flatErr) - } - default: - flatErr.Errors = append(flatErr.Errors, err) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go deleted file mode 100644 index 6c7a3cc..0000000 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ /dev/null @@ -1,27 +0,0 @@ -package multierror - -import ( - "fmt" - "strings" -) - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) - } - - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d errors occurred:\n\n%s", - len(es), strings.Join(points, "\n")) -} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go deleted file mode 100644 index 89b1422..0000000 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ /dev/null @@ -1,51 +0,0 @@ -package multierror - -import ( - "fmt" -) - -// Error is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type Error struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *Error) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *Error) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *Error) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. -// -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. -func (e *Error) WrappedErrors() []error { - return e.Errors -} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go deleted file mode 100644 index 5c477ab..0000000 --- a/vendor/github.com/hashicorp/go-multierror/prefix.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// Prefix is a helper function that will prefix some text -// to the given error. If the error is a multierror.Error, then -// it will be prefixed to each wrapped error. -// -// This is useful to use when appending multiple multierrors -// together in order to give better scoping. -func Prefix(err error, prefix string) error { - if err == nil { - return nil - } - - format := fmt.Sprintf("%s {{err}}", prefix) - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Wrap each of the errors - for i, e := range err.Errors { - err.Errors[i] = errwrap.Wrapf(format, e) - } - - return err - default: - return errwrap.Wrapf(format, err) - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE deleted file mode 100644 index 82b4de9..0000000 --- a/vendor/github.com/hashicorp/go-plugin/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md deleted file mode 100644 index 78d354e..0000000 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ /dev/null @@ -1,168 +0,0 @@ -# Go Plugin System over RPC - -`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 4 years. While initially -created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). - -While the plugin system is over RPC, it is currently only designed to work -over a local [reliable] network. Plugins over a real network are not supported -and will lead to unexpected behavior. - -This plugin system has been used on millions of machines across many different -projects and has proven to be battle hardened and ready for production use. - -## Features - -The HashiCorp plugin system supports a number of features: - -**Plugins are Go interface implementations.** This makes writing and consuming -plugins feel very natural. To a plugin author: you just implement an -interface as if it were going to run in the same process. For a plugin user: -you just use and call functions on an interface as if it were in the same -process. This plugin system handles the communication in between. - -**Cross-language support.** Plugins can be written (and consumed) by -almost every major language. This library supports serving plugins via -[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written -in any language. - -**Complex arguments and return values are supported.** This library -provides APIs for handling complex arguments and return values such -as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library -(`MuxBroker`) for creating new connections between the client/server to -serve additional interfaces or transfer raw data. - -**Bidirectional communication.** Because the plugin system supports -complex arguments, the host process can send it interface implementations -and the plugin can call back into the host process. - -**Built-in Logging.** Any plugins that use the `log` standard library -will have log data automatically sent to the host process. The host -process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. If the host system -uses [hclog](https://github.com/hashicorp/go-hclog) then the log data -will be structured. If the plugin also uses hclog, logs from the plugin -will be sent to the host hclog and be structured. - -**Protocol Versioning.** A very basic "protocol version" is supported that -can be incremented to invalidate any previous plugins. This is useful when -interface signatures are changing, protocol level changes are necessary, -etc. When a protocol version is incompatible, a human friendly error -message is shown to the end user. - -**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue -to use stdout/stderr as usual and the output will get mirrored back to -the host process. The host process can control what `io.Writer` these -streams go to to prevent this from happening. - -**TTY Preservation.** Plugin subprocesses are connected to the identical -stdin file descriptor as the host process, allowing software that requires -a TTY to work. For example, a plugin can execute `ssh` and even though there -are multiple subprocesses and RPC happening, it will look and act perfectly -to the end user. - -**Host upgrade while a plugin is running.** Plugins can be "reattached" -so that the host process can be upgraded while the plugin is still running. -This requires the host/plugin to know this is possible and daemonize -properly. `NewClient` takes a `ReattachConfig` to determine if and how to -reattach. - -**Cryptographically Secure Plugins.** Plugins can be verified with an expected -checksum and RPC communications can be configured to use TLS. The host process -must be properly secured to protect this configuration. - -## Architecture - -The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single -connection is made between any plugin and the host process. For net/rpc-based -plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. For gRPC-based plugins, -the HTTP2 protocol handles multiplexing. - -This architecture has a number of benefits: - - * Plugins can't crash your host process: A panic in a plugin doesn't - panic the plugin user. - - * Plugins are very easy to write: just write a Go application and `go build`. - Or use any other language to write a gRPC server with a tiny amount of - boilerplate to support go-plugin. - - * Plugins are very easy to install: just put the binary in a location where - the host will find it (depends on the host but this library also provides - helpers), and the plugin host handles the rest. - - * Plugins can be relatively secure: The plugin only has access to the - interfaces and args given to it, not to the entire memory space of the - process. Additionally, go-plugin can communicate with the plugin over - TLS. - -## Usage - -To use the plugin system, you must take the following steps. These are -high-level steps that must be done. Examples are available in the -`examples/` directory. - - 1. Choose the interface(s) you want to expose for plugins. - - 2. For each interface, implement an implementation of that interface - that communicates over a `net/rpc` connection or other a - [gRPC](http://www.grpc.io) connection or both. You'll have to implement - both a client and server implementation. - - 3. Create a `Plugin` implementation that knows how to create the RPC - client/server for a given plugin type. - - 4. Plugin authors call `plugin.Serve` to serve a plugin from the - `main` function. - - 5. Plugin users use `plugin.Client` to launch a subprocess and request - an interface implementation over RPC. - -That's it! In practice, step 2 is the most tedious and time consuming step. -Even so, it isn't very difficult and you can see examples in the `examples/` -directory as well as throughout our various open source projects. - -For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). - -## Roadmap - -Our plugin system is constantly evolving. As we use the plugin system for -new projects or for new features in existing projects, we constantly find -improvements we can make. - -At this point in time, the roadmap for the plugin system is: - -**Semantic Versioning.** Plugins will be able to implement a semantic version. -This plugin system will give host processes a system for constraining -versions. This is in addition to the protocol versioning already present -which is more for larger underlying changes. - -**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) -to support automatic download + install of plugins. Paired with cryptographically -secure plugins (above), we can make this a safe operation for an amazing -user experience. - -## What About Shared Libraries? - -When we started using plugins (late 2012, early 2013), plugins over RPC -were the only option since Go didn't support dynamic library loading. Today, -Go still doesn't support dynamic library loading, but they do intend to. -Since 2012, our plugin system has stabilized from millions of users using it, -and has many benefits we've come to value greatly. - -For example, we intend to use this plugin system in -[Vault](https://www.vaultproject.io), and dynamic library loading will -simply never be acceptable in Vault for security reasons. That is an extreme -example, but we believe our library system has more upsides than downsides -over dynamic library loading and since we've had it built and tested for years, -we'll likely continue to use it. - -Shared libraries have one major advantage over our system which is much -higher performance. In real world scenarios across our various tools, -we've never required any more performance out of our plugin system and it -has seen very high throughput, so this isn't a concern for us at the moment. - diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go deleted file mode 100644 index b912826..0000000 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ /dev/null @@ -1,772 +0,0 @@ -package plugin - -import ( - "bufio" - "crypto/subtle" - "crypto/tls" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unicode" - - hclog "github.com/hashicorp/go-hclog" -) - -// If this is 1, then we've called CleanupClients. This can be used -// by plugin RPC implementations to change error behavior since you -// can expected network connection errors at this point. This should be -// read by using sync/atomic. -var Killed uint32 = 0 - -// This is a slice of the "managed" clients which are cleaned up when -// calling Cleanup -var managedClients = make([]*Client, 0, 5) -var managedClientsLock sync.Mutex - -// Error types -var ( - // ErrProcessNotFound is returned when a client is instantiated to - // reattach to an existing process and it isn't found. - ErrProcessNotFound = errors.New("Reattachment process not found") - - // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match - // the one provided in the SecureConfig. - ErrChecksumsDoNotMatch = errors.New("checksums did not match") - - // ErrSecureNoChecksum is returned when an empty checksum is provided to the - // SecureConfig. - ErrSecureConfigNoChecksum = errors.New("no checksum provided") - - // ErrSecureNoHash is returned when a nil Hash object is provided to the - // SecureConfig. - ErrSecureConfigNoHash = errors.New("no hash implementation provided") - - // ErrSecureConfigAndReattach is returned when both Reattach and - // SecureConfig are set. - ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") -) - -// Client handles the lifecycle of a plugin application. It launches -// plugins, connects to them, dispenses interface implementations, and handles -// killing the process. -// -// Plugin hosts should use one Client for each plugin executable. To -// dispense a plugin type, use the `Client.Client` function, and then -// cal `Dispense`. This awkward API is mostly historical but is used to split -// the client that deals with subprocess management and the client that -// does RPC management. -// -// See NewClient and ClientConfig for using a Client. -type Client struct { - config *ClientConfig - exited bool - doneLogging chan struct{} - l sync.Mutex - address net.Addr - process *os.Process - client ClientProtocol - protocol Protocol - logger hclog.Logger -} - -// ClientConfig is the configuration used to initialize a new -// plugin client. After being used to initialize a plugin client, -// that configuration must not be modified again. -type ClientConfig struct { - // HandshakeConfig is the configuration that must match servers. - HandshakeConfig - - // Plugins are the plugins that can be consumed. - Plugins map[string]Plugin - - // One of the following must be set, but not both. - // - // Cmd is the unstarted subprocess for starting the plugin. If this is - // set, then the Client starts the plugin process on its own and connects - // to it. - // - // Reattach is configuration for reattaching to an existing plugin process - // that is already running. This isn't common. - Cmd *exec.Cmd - Reattach *ReattachConfig - - // SecureConfig is configuration for verifying the integrity of the - // executable. It can not be used with Reattach. - SecureConfig *SecureConfig - - // TLSConfig is used to enable TLS on the RPC client. - TLSConfig *tls.Config - - // Managed represents if the client should be managed by the - // plugin package or not. If true, then by calling CleanupClients, - // it will automatically be cleaned up. Otherwise, the client - // user is fully responsible for making sure to Kill all plugin - // clients. By default the client is _not_ managed. - Managed bool - - // The minimum and maximum port to use for communicating with - // the subprocess. If not set, this defaults to 10,000 and 25,000 - // respectively. - MinPort, MaxPort uint - - // StartTimeout is the timeout to wait for the plugin to say it - // has started successfully. - StartTimeout time.Duration - - // If non-nil, then the stderr of the client will be written to here - // (as well as the log). This is the original os.Stderr of the subprocess. - // This isn't the output of synced stderr. - Stderr io.Writer - - // SyncStdout, SyncStderr can be set to override the - // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will automatically be - // hooked up to os.Stdin, Stdout, and Stderr, respectively. - // - // If the default values (nil) are used, then this package will not - // sync any of these streams. - SyncStdout io.Writer - SyncStderr io.Writer - - // AllowedProtocols is a list of allowed protocols. If this isn't set, - // then only netrpc is allowed. This is so that older go-plugin systems - // can show friendly errors if they see a plugin with an unknown - // protocol. - // - // By setting this, you can cause an error immediately on plugin start - // if an unsupported protocol is used with a good error message. - // - // If this isn't set at all (nil value), then only net/rpc is accepted. - // This is done for legacy reasons. You must explicitly opt-in to - // new protocols. - AllowedProtocols []Protocol - - // Logger is the logger that the client will used. If none is provided, - // it will default to hclog's default logger. - Logger hclog.Logger -} - -// ReattachConfig is used to configure a client to reattach to an -// already-running plugin process. You can retrieve this information by -// calling ReattachConfig on Client. -type ReattachConfig struct { - Protocol Protocol - Addr net.Addr - Pid int -} - -// SecureConfig is used to configure a client to verify the integrity of an -// executable before running. It does this by verifying the checksum is -// expected. Hash is used to specify the hashing method to use when checksumming -// the file. The configuration is verified by the client by calling the -// SecureConfig.Check() function. -// -// The host process should ensure the checksum was provided by a trusted and -// authoritative source. The binary should be installed in such a way that it -// can not be modified by an unauthorized user between the time of this check -// and the time of execution. -type SecureConfig struct { - Checksum []byte - Hash hash.Hash -} - -// Check takes the filepath to an executable and returns true if the checksum of -// the file matches the checksum provided in the SecureConfig. -func (s *SecureConfig) Check(filePath string) (bool, error) { - if len(s.Checksum) == 0 { - return false, ErrSecureConfigNoChecksum - } - - if s.Hash == nil { - return false, ErrSecureConfigNoHash - } - - file, err := os.Open(filePath) - if err != nil { - return false, err - } - defer file.Close() - - _, err = io.Copy(s.Hash, file) - if err != nil { - return false, err - } - - sum := s.Hash.Sum(nil) - - return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil -} - -// This makes sure all the managed subprocesses are killed and properly -// logged. This should be called before the parent process running the -// plugins exits. -// -// This must only be called _once_. -func CleanupClients() { - // Set the killed to true so that we don't get unexpected panics - atomic.StoreUint32(&Killed, 1) - - // Kill all the managed clients in parallel and use a WaitGroup - // to wait for them all to finish up. - var wg sync.WaitGroup - managedClientsLock.Lock() - for _, client := range managedClients { - wg.Add(1) - - go func(client *Client) { - client.Kill() - wg.Done() - }(client) - } - managedClientsLock.Unlock() - - log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") - wg.Wait() -} - -// Creates a new plugin client which manages the lifecycle of an external -// plugin and gets the address for the RPC connection. -// -// The client must be cleaned up at some point by calling Kill(). If -// the client is a managed client (created with NewManagedClient) you -// can just call CleanupClients at the end of your program and they will -// be properly cleaned. -func NewClient(config *ClientConfig) (c *Client) { - if config.MinPort == 0 && config.MaxPort == 0 { - config.MinPort = 10000 - config.MaxPort = 25000 - } - - if config.StartTimeout == 0 { - config.StartTimeout = 1 * time.Minute - } - - if config.Stderr == nil { - config.Stderr = ioutil.Discard - } - - if config.SyncStdout == nil { - config.SyncStdout = ioutil.Discard - } - if config.SyncStderr == nil { - config.SyncStderr = ioutil.Discard - } - - if config.AllowedProtocols == nil { - config.AllowedProtocols = []Protocol{ProtocolNetRPC} - } - - if config.Logger == nil { - config.Logger = hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: hclog.Trace, - Name: "plugin", - }) - } - - c = &Client{ - config: config, - logger: config.Logger, - } - if config.Managed { - managedClientsLock.Lock() - managedClients = append(managedClients, c) - managedClientsLock.Unlock() - } - - return -} - -// Client returns the protocol client for this connection. -// -// Subsequent calls to this will return the same client. -func (c *Client) Client() (ClientProtocol, error) { - _, err := c.Start() - if err != nil { - return nil, err - } - - c.l.Lock() - defer c.l.Unlock() - - if c.client != nil { - return c.client, nil - } - - switch c.protocol { - case ProtocolNetRPC: - c.client, err = newRPCClient(c) - - case ProtocolGRPC: - c.client, err = newGRPCClient(c) - - default: - return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) - } - - if err != nil { - c.client = nil - return nil, err - } - - return c.client, nil -} - -// Tells whether or not the underlying process has exited. -func (c *Client) Exited() bool { - c.l.Lock() - defer c.l.Unlock() - return c.exited -} - -// End the executing subprocess (if it is running) and perform any cleanup -// tasks necessary such as capturing any remaining logs and so on. -// -// This method blocks until the process successfully exits. -// -// This method can safely be called multiple times. -func (c *Client) Kill() { - // Grab a lock to read some private fields. - c.l.Lock() - process := c.process - addr := c.address - doneCh := c.doneLogging - c.l.Unlock() - - // If there is no process, we never started anything. Nothing to kill. - if process == nil { - return - } - - // We need to check for address here. It is possible that the plugin - // started (process != nil) but has no address (addr == nil) if the - // plugin failed at startup. If we do have an address, we need to close - // the plugin net connections. - graceful := false - if addr != nil { - // Close the client to cleanly exit the process. - client, err := c.Client() - if err == nil { - err = client.Close() - - // If there is no error, then we attempt to wait for a graceful - // exit. If there was an error, we assume that graceful cleanup - // won't happen and just force kill. - graceful = err == nil - if err != nil { - // If there was an error just log it. We're going to force - // kill in a moment anyways. - c.logger.Warn("error closing client during Kill", "err", err) - } - } - } - - // If we're attempting a graceful exit, then we wait for a short period - // of time to allow that to happen. To wait for this we just wait on the - // doneCh which would be closed if the process exits. - if graceful { - select { - case <-doneCh: - return - case <-time.After(250 * time.Millisecond): - } - } - - // If graceful exiting failed, just kill it - process.Kill() - - // Wait for the client to finish logging so we have a complete log - <-doneCh -} - -// Starts the underlying subprocess, communicating with it to negotiate -// a port for RPC connections, and returning the address to connect via RPC. -// -// This method is safe to call multiple times. Subsequent calls have no effect. -// Once a client has been started once, it cannot be started again, even if -// it was killed. -func (c *Client) Start() (addr net.Addr, err error) { - c.l.Lock() - defer c.l.Unlock() - - if c.address != nil { - return c.address, nil - } - - // If one of cmd or reattach isn't set, then it is an error. We wrap - // this in a {} for scoping reasons, and hopeful that the escape - // analysis will pop the stock here. - { - cmdSet := c.config.Cmd != nil - attachSet := c.config.Reattach != nil - secureSet := c.config.SecureConfig != nil - if cmdSet == attachSet { - return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") - } - - if secureSet && attachSet { - return nil, ErrSecureConfigAndReattach - } - } - - // Create the logging channel for when we kill - c.doneLogging = make(chan struct{}) - - if c.config.Reattach != nil { - // Verify the process still exists. If not, then it is an error - p, err := os.FindProcess(c.config.Reattach.Pid) - if err != nil { - return nil, err - } - - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial( - c.config.Reattach.Addr.Network(), - c.config.Reattach.Addr.String()) - if err != nil { - p.Kill() - return nil, ErrProcessNotFound - } - conn.Close() - - // Goroutine to mark exit status - go func(pid int) { - // Wait for the process to die - pidWait(pid) - - // Log so we can see it - c.logger.Debug("reattached plugin process exited") - - // Mark it - c.l.Lock() - defer c.l.Unlock() - c.exited = true - - // Close the logging channel since that doesn't work on reattach - close(c.doneLogging) - }(p.Pid) - - // Set the address and process - c.address = c.config.Reattach.Addr - c.process = p - c.protocol = c.config.Reattach.Protocol - if c.protocol == "" { - // Default the protocol to net/rpc for backwards compatibility - c.protocol = ProtocolNetRPC - } - - return c.address, nil - } - - env := []string{ - fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), - fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), - fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), - } - - stdout_r, stdout_w := io.Pipe() - stderr_r, stderr_w := io.Pipe() - - cmd := c.config.Cmd - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Env = append(cmd.Env, env...) - cmd.Stdin = os.Stdin - cmd.Stderr = stderr_w - cmd.Stdout = stdout_w - - if c.config.SecureConfig != nil { - if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { - return nil, fmt.Errorf("error verifying checksum: %s", err) - } else if !ok { - return nil, ErrChecksumsDoNotMatch - } - } - - c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) - err = cmd.Start() - if err != nil { - return - } - - // Set the process - c.process = cmd.Process - - // Make sure the command is properly cleaned up if there is an error - defer func() { - r := recover() - - if err != nil || r != nil { - cmd.Process.Kill() - } - - if r != nil { - panic(r) - } - }() - - // Start goroutine to wait for process to exit - exitCh := make(chan struct{}) - go func() { - // Make sure we close the write end of our stderr/stdout so - // that the readers send EOF properly. - defer stderr_w.Close() - defer stdout_w.Close() - - // Wait for the command to end. - cmd.Wait() - - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", "path", cmd.Path) - os.Stderr.Sync() - - // Mark that we exited - close(exitCh) - - // Set that we exited, which takes a lock - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }() - - // Start goroutine that logs the stderr - go c.logStderr(stderr_r) - - // Start a goroutine that is going to be reading the lines - // out of stdout - linesCh := make(chan []byte) - go func() { - defer close(linesCh) - - buf := bufio.NewReader(stdout_r) - for { - line, err := buf.ReadBytes('\n') - if line != nil { - linesCh <- line - } - - if err == io.EOF { - return - } - } - }() - - // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is an io.Pipe - defer func() { - go func() { - for _ = range linesCh { - } - }() - }() - - // Some channels for the next step - timeout := time.After(c.config.StartTimeout) - - // Start looking for the address - c.logger.Debug("waiting for RPC address", "path", cmd.Path) - select { - case <-timeout: - err = errors.New("timeout while waiting for plugin to start") - case <-exitCh: - err = errors.New("plugin exited before we could connect") - case lineBytes := <-linesCh: - // Trim the line and split by "|" in order to get the parts of - // the output. - line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 6) - if len(parts) < 4 { - err = fmt.Errorf( - "Unrecognized remote plugin message: %s\n\n"+ - "This usually means that the plugin is either invalid or simply\n"+ - "needs to be recompiled to support the latest protocol.", line) - return - } - - // Check the core protocol. Wrapped in a {} for scoping. - { - var coreProtocol int64 - coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) - if err != nil { - err = fmt.Errorf("Error parsing core protocol version: %s", err) - return - } - - if int(coreProtocol) != CoreProtocolVersion { - err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Ours: %d\n\n"+ - "To fix this, the plugin usually only needs to be recompiled.\n"+ - "Please report this to the plugin author.", parts[0], CoreProtocolVersion) - return - } - } - - // Parse the protocol version - var protocol int64 - protocol, err = strconv.ParseInt(parts[1], 10, 0) - if err != nil { - err = fmt.Errorf("Error parsing protocol version: %s", err) - return - } - - // Test the API version - if uint(protocol) != c.config.ProtocolVersion { - err = fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) - return - } - - switch parts[2] { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", parts[3]) - case "unix": - addr, err = net.ResolveUnixAddr("unix", parts[3]) - default: - err = fmt.Errorf("Unknown address type: %s", parts[3]) - } - - // If we have a server type, then record that. We default to net/rpc - // for backwards compatibility. - c.protocol = ProtocolNetRPC - if len(parts) >= 5 { - c.protocol = Protocol(parts[4]) - } - - found := false - for _, p := range c.config.AllowedProtocols { - if p == c.protocol { - found = true - break - } - } - if !found { - err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", - c.protocol, c.config.AllowedProtocols) - return - } - - } - - c.address = addr - return -} - -// ReattachConfig returns the information that must be provided to NewClient -// to reattach to the plugin process that this client started. This is -// useful for plugins that detach from their parent process. -// -// If this returns nil then the process hasn't been started yet. Please -// call Start or Client before calling this. -func (c *Client) ReattachConfig() *ReattachConfig { - c.l.Lock() - defer c.l.Unlock() - - if c.address == nil { - return nil - } - - if c.config.Cmd != nil && c.config.Cmd.Process == nil { - return nil - } - - // If we connected via reattach, just return the information as-is - if c.config.Reattach != nil { - return c.config.Reattach - } - - return &ReattachConfig{ - Protocol: c.protocol, - Addr: c.address, - Pid: c.config.Cmd.Process.Pid, - } -} - -// Protocol returns the protocol of server on the remote end. This will -// start the plugin process if it isn't already started. Errors from -// starting the plugin are surpressed and ProtocolInvalid is returned. It -// is recommended you call Start explicitly before calling Protocol to ensure -// no errors occur. -func (c *Client) Protocol() Protocol { - _, err := c.Start() - if err != nil { - return ProtocolInvalid - } - - return c.protocol -} - -// dialer is compatible with grpc.WithDialer and creates the connection -// to the plugin. -func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - // Connect to the client - conn, err := net.Dial(c.address.Network(), c.address.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - // If we have a TLS config we wrap our connection. We only do this - // for net/rpc since gRPC uses its own mechanism for TLS. - if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - return conn, nil -} - -func (c *Client) logStderr(r io.Reader) { - bufR := bufio.NewReader(r) - for { - line, err := bufR.ReadString('\n') - if line != "" { - c.config.Stderr.Write([]byte(line)) - line = strings.TrimRightFunc(line, unicode.IsSpace) - - l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) - - entry, err := parseJSON(line) - // If output is not JSON format, print directly to Debug - if err != nil { - l.Debug(line) - } else { - out := flattenKVPairs(entry.KVPairs) - - l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat)) - switch hclog.LevelFromString(entry.Level) { - case hclog.Trace: - l.Trace(entry.Message, out...) - case hclog.Debug: - l.Debug(entry.Message, out...) - case hclog.Info: - l.Info(entry.Message, out...) - case hclog.Warn: - l.Warn(entry.Message, out...) - case hclog.Error: - l.Error(entry.Message, out...) - } - } - } - - if err == io.EOF { - break - } - } - - // Flag that we've completed logging for others - close(c.doneLogging) -} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go deleted file mode 100644 index d22c566..0000000 --- a/vendor/github.com/hashicorp/go-plugin/discover.go +++ /dev/null @@ -1,28 +0,0 @@ -package plugin - -import ( - "path/filepath" -) - -// Discover discovers plugins that are in a given directory. -// -// The directory doesn't need to be absolute. For example, "." will work fine. -// -// This currently assumes any file matching the glob is a plugin. -// In the future this may be smarter about checking that a file is -// executable and so on. -// -// TODO: test -func Discover(glob, dir string) ([]string, error) { - var err error - - // Make the directory absolute if it isn't already - if !filepath.IsAbs(dir) { - dir, err = filepath.Abs(dir) - if err != nil { - return nil, err - } - } - - return filepath.Glob(filepath.Join(dir, glob)) -} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go deleted file mode 100644 index 22a7baa..0000000 --- a/vendor/github.com/hashicorp/go-plugin/error.go +++ /dev/null @@ -1,24 +0,0 @@ -package plugin - -// This is a type that wraps error types so that they can be messaged -// across RPC channels. Since "error" is an interface, we can't always -// gob-encode the underlying structure. This is a valid error interface -// implementer that we will push across. -type BasicError struct { - Message string -} - -// NewBasicError is used to create a BasicError. -// -// err is allowed to be nil. -func NewBasicError(err error) *BasicError { - if err == nil { - return nil - } - - return &BasicError{err.Error()} -} - -func (e *BasicError) Error() string { - return e.Message -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go deleted file mode 100644 index 3bcf95e..0000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ /dev/null @@ -1,83 +0,0 @@ -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health/grpc_health_v1" -) - -// newGRPCClient creates a new GRPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newGRPCClient(c *Client) (*GRPCClient, error) { - // Build dialing options. - opts := make([]grpc.DialOption, 0, 5) - - // We use a custom dialer so that we can connect over unix domain sockets - opts = append(opts, grpc.WithDialer(c.dialer)) - - // go-plugin expects to block the connection - opts = append(opts, grpc.WithBlock()) - - // Fail right away - opts = append(opts, grpc.FailOnNonTempDialError(true)) - - // If we have no TLS configuration set, we need to explicitly tell grpc - // that we're connecting with an insecure connection. - if c.config.TLSConfig == nil { - opts = append(opts, grpc.WithInsecure()) - } else { - opts = append(opts, grpc.WithTransportCredentials( - credentials.NewTLS(c.config.TLSConfig))) - } - - // Connect. Note the first parameter is unused because we use a custom - // dialer that has the state to see the address. - conn, err := grpc.Dial("unused", opts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - Conn: conn, - Plugins: c.config.Plugins, - }, nil -} - -// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. -type GRPCClient struct { - Conn *grpc.ClientConn - Plugins map[string]Plugin -} - -// ClientProtocol impl. -func (c *GRPCClient) Close() error { - return c.Conn.Close() -} - -// ClientProtocol impl. -func (c *GRPCClient) Dispense(name string) (interface{}, error) { - raw, ok := c.Plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - p, ok := raw.(GRPCPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) - } - - return p.GRPCClient(c.Conn) -} - -// ClientProtocol impl. -func (c *GRPCClient) Ping() error { - client := grpc_health_v1.NewHealthClient(c.Conn) - _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ - Service: GRPCServiceName, - }) - - return err -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go deleted file mode 100644 index 177a0cd..0000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ /dev/null @@ -1,115 +0,0 @@ -package plugin - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -// GRPCServiceName is the name of the service that the health check should -// return as passing. -const GRPCServiceName = "plugin" - -// DefaultGRPCServer can be used with the "GRPCServer" field for Server -// as a default factory method to create a gRPC server with no extra options. -func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { - return grpc.NewServer(opts...) -} - -// GRPCServer is a ServerType implementation that serves plugins over -// gRPC. This allows plugins to easily be written for other languages. -// -// The GRPCServer outputs a custom configuration as a base64-encoded -// JSON structure represented by the GRPCServerConfig config structure. -type GRPCServer struct { - // Plugins are the list of plugins to serve. - Plugins map[string]Plugin - - // Server is the actual server that will accept connections. This - // will be used for plugin registration as well. - Server func([]grpc.ServerOption) *grpc.Server - - // TLS should be the TLS configuration if available. If this is nil, - // the connection will not have transport security. - TLS *tls.Config - - // DoneCh is the channel that is closed when this server has exited. - DoneCh chan struct{} - - // Stdout/StderrLis are the readers for stdout/stderr that will be copied - // to the stdout/stderr connection that is output. - Stdout io.Reader - Stderr io.Reader - - config GRPCServerConfig - server *grpc.Server -} - -// ServerProtocol impl. -func (s *GRPCServer) Init() error { - // Create our server - var opts []grpc.ServerOption - if s.TLS != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) - } - s.server = s.Server(opts) - - // Register the health service - healthCheck := health.NewServer() - healthCheck.SetServingStatus( - GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) - grpc_health_v1.RegisterHealthServer(s.server, healthCheck) - - // Register all our plugins onto the gRPC server. - for k, raw := range s.Plugins { - p, ok := raw.(GRPCPlugin) - if !ok { - return fmt.Errorf("%q is not a GRPC-compatibile plugin", k) - } - - if err := p.GRPCServer(s.server); err != nil { - return fmt.Errorf("error registring %q: %s", k, err) - } - } - - return nil -} - -// Config is the GRPCServerConfig encoded as JSON then base64. -func (s *GRPCServer) Config() string { - // Create a buffer that will contain our final contents - var buf bytes.Buffer - - // Wrap the base64 encoding with JSON encoding. - if err := json.NewEncoder(&buf).Encode(s.config); err != nil { - // We panic since ths shouldn't happen under any scenario. We - // carefully control the structure being encoded here and it should - // always be successful. - panic(err) - } - - return buf.String() -} - -func (s *GRPCServer) Serve(lis net.Listener) { - // Start serving in a goroutine - go s.server.Serve(lis) - - // Wait until graceful completion - <-s.DoneCh -} - -// GRPCServerConfig is the extra configuration passed along for consumers -// to facilitate using GRPC plugins. -type GRPCServerConfig struct { - StdoutAddr string `json:"stdout_addr"` - StderrAddr string `json:"stderr_addr"` -} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go deleted file mode 100644 index 2996c14..0000000 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ /dev/null @@ -1,73 +0,0 @@ -package plugin - -import ( - "encoding/json" - "time" -) - -// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host -type logEntry struct { - Message string `json:"@message"` - Level string `json:"@level"` - Timestamp time.Time `json:"timestamp"` - KVPairs []*logEntryKV `json:"kv_pairs"` -} - -// logEntryKV is a key value pair within the Output payload -type logEntryKV struct { - Key string `json:"key"` - Value interface{} `json:"value"` -} - -// flattenKVPairs is used to flatten KVPair slice into []interface{} -// for hclog consumption. -func flattenKVPairs(kvs []*logEntryKV) []interface{} { - var result []interface{} - for _, kv := range kvs { - result = append(result, kv.Key) - result = append(result, kv.Value) - } - - return result -} - -// parseJSON handles parsing JSON output -func parseJSON(input string) (*logEntry, error) { - var raw map[string]interface{} - entry := &logEntry{} - - err := json.Unmarshal([]byte(input), &raw) - if err != nil { - return nil, err - } - - // Parse hclog-specific objects - if v, ok := raw["@message"]; ok { - entry.Message = v.(string) - delete(raw, "@message") - } - - if v, ok := raw["@level"]; ok { - entry.Level = v.(string) - delete(raw, "@level") - } - - if v, ok := raw["@timestamp"]; ok { - t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) - if err != nil { - return nil, err - } - entry.Timestamp = t - delete(raw, "@timestamp") - } - - // Parse dynamic KV args from the hclog payload. - for k, v := range raw { - entry.KVPairs = append(entry.KVPairs, &logEntryKV{ - Key: k, - Value: v, - }) - } - - return entry, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go deleted file mode 100644 index 01c45ad..0000000 --- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go +++ /dev/null @@ -1,204 +0,0 @@ -package plugin - -import ( - "encoding/binary" - "fmt" - "log" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/yamux" -) - -// MuxBroker is responsible for brokering multiplexed connections by unique ID. -// -// It is used by plugins to multiplex multiple RPC connections and data -// streams on top of a single connection between the plugin process and the -// host process. -// -// This allows a plugin to request a channel with a specific ID to connect to -// or accept a connection from, and the broker handles the details of -// holding these channels open while they're being negotiated. -// -// The Plugin interface has access to these for both Server and Client. -// The broker can be used by either (optionally) to reserve and connect to -// new multiplexed streams. This is useful for complex args and return values, -// or anything else you might need a data stream for. -type MuxBroker struct { - nextId uint32 - session *yamux.Session - streams map[uint32]*muxBrokerPending - - sync.Mutex -} - -type muxBrokerPending struct { - ch chan net.Conn - doneCh chan struct{} -} - -func newMuxBroker(s *yamux.Session) *MuxBroker { - return &MuxBroker{ - session: s, - streams: make(map[uint32]*muxBrokerPending), - } -} - -// Accept accepts a connection by ID. -// -// This should not be called multiple times with the same ID at one time. -func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { - var c net.Conn - p := m.getStream(id) - select { - case c = <-p.ch: - close(p.doneCh) - case <-time.After(5 * time.Second): - m.Lock() - defer m.Unlock() - delete(m.streams, id) - - return nil, fmt.Errorf("timeout waiting for accept") - } - - // Ack our connection - if err := binary.Write(c, binary.LittleEndian, id); err != nil { - c.Close() - return nil, err - } - - return c, nil -} - -// AcceptAndServe is used to accept a specific stream ID and immediately -// serve an RPC server on that stream ID. This is used to easily serve -// complex arguments. -// -// The served interface is always registered to the "Plugin" name. -func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { - conn, err := m.Accept(id) - if err != nil { - log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) - return - } - - serve(conn, "Plugin", v) -} - -// Close closes the connection and all sub-connections. -func (m *MuxBroker) Close() error { - return m.session.Close() -} - -// Dial opens a connection by ID. -func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { - // Open the stream - stream, err := m.session.OpenStream() - if err != nil { - return nil, err - } - - // Write the stream ID onto the wire. - if err := binary.Write(stream, binary.LittleEndian, id); err != nil { - stream.Close() - return nil, err - } - - // Read the ack that we connected. Then we're off! - var ack uint32 - if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { - stream.Close() - return nil, err - } - if ack != id { - stream.Close() - return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) - } - - return stream, nil -} - -// NextId returns a unique ID to use next. -// -// It is possible for very long-running plugin hosts to wrap this value, -// though it would require a very large amount of RPC calls. In practice -// we've never seen it happen. -func (m *MuxBroker) NextId() uint32 { - return atomic.AddUint32(&m.nextId, 1) -} - -// Run starts the brokering and should be executed in a goroutine, since it -// blocks forever, or until the session closes. -// -// Uses of MuxBroker never need to call this. It is called internally by -// the plugin host/client. -func (m *MuxBroker) Run() { - for { - stream, err := m.session.AcceptStream() - if err != nil { - // Once we receive an error, just exit - break - } - - // Read the stream ID from the stream - var id uint32 - if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { - stream.Close() - continue - } - - // Initialize the waiter - p := m.getStream(id) - select { - case p.ch <- stream: - default: - } - - // Wait for a timeout - go m.timeoutWait(id, p) - } -} - -func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.streams[id] - if ok { - return p - } - - m.streams[id] = &muxBrokerPending{ - ch: make(chan net.Conn, 1), - doneCh: make(chan struct{}), - } - return m.streams[id] -} - -func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { - // Wait for the stream to either be picked up and connected, or - // for a timeout. - timeout := false - select { - case <-p.doneCh: - case <-time.After(5 * time.Second): - timeout = true - } - - m.Lock() - defer m.Unlock() - - // Delete the stream so no one else can grab it - delete(m.streams, id) - - // If we timed out, then check if we have a channel in the buffer, - // and if so, close it. - if timeout { - select { - case s := <-p.ch: - s.Close() - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go deleted file mode 100644 index 6b7bdd1..0000000 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ /dev/null @@ -1,56 +0,0 @@ -// The plugin package exposes functions and helpers for communicating to -// plugins which are implemented as standalone binary applications. -// -// plugin.Client fully manages the lifecycle of executing the application, -// connecting to it, and returning the RPC client for dispensing plugins. -// -// plugin.Serve fully manages listeners to expose an RPC server from a binary -// that plugin.Client can connect to. -package plugin - -import ( - "errors" - "net/rpc" - - "google.golang.org/grpc" -) - -// Plugin is the interface that is implemented to serve/connect to an -// inteface implementation. -type Plugin interface { - // Server should return the RPC server compatible struct to serve - // the methods that the Client calls over net/rpc. - Server(*MuxBroker) (interface{}, error) - - // Client returns an interface implementation for the plugin you're - // serving that communicates to the server end of the plugin. - Client(*MuxBroker, *rpc.Client) (interface{}, error) -} - -// GRPCPlugin is the interface that is implemented to serve/connect to -// a plugin over gRPC. -type GRPCPlugin interface { - // GRPCServer should register this plugin for serving with the - // given GRPCServer. Unlike Plugin.Server, this is only called once - // since gRPC plugins serve singletons. - GRPCServer(*grpc.Server) error - - // GRPCClient should return the interface implementation for the plugin - // you're serving via gRPC. - GRPCClient(*grpc.ClientConn) (interface{}, error) -} - -// NetRPCUnsupportedPlugin implements Plugin but returns errors for the -// Server and Client functions. This will effectively disable support for -// net/rpc based plugins. -// -// This struct can be embedded in your struct. -type NetRPCUnsupportedPlugin struct{} - -func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} - -func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go deleted file mode 100644 index 88c999a..0000000 --- a/vendor/github.com/hashicorp/go-plugin/process.go +++ /dev/null @@ -1,24 +0,0 @@ -package plugin - -import ( - "time" -) - -// pidAlive checks whether a pid is alive. -func pidAlive(pid int) bool { - return _pidAlive(pid) -} - -// pidWait blocks for a process to exit. -func pidWait(pid int) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for range ticker.C { - if !pidAlive(pid) { - break - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go deleted file mode 100644 index 70ba546..0000000 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package plugin - -import ( - "os" - "syscall" -) - -// _pidAlive tests whether a process is alive or not by sending it Signal 0, -// since Go otherwise has no way to test this. -func _pidAlive(pid int) bool { - proc, err := os.FindProcess(pid) - if err == nil { - err = proc.Signal(syscall.Signal(0)) - } - - return err == nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go deleted file mode 100644 index 9f7b018..0000000 --- a/vendor/github.com/hashicorp/go-plugin/process_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "syscall" -) - -const ( - // Weird name but matches the MSDN docs - exit_STILL_ACTIVE = 259 - - processDesiredAccess = syscall.STANDARD_RIGHTS_READ | - syscall.PROCESS_QUERY_INFORMATION | - syscall.SYNCHRONIZE -) - -// _pidAlive tests whether a process is alive or not -func _pidAlive(pid int) bool { - h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) - if err != nil { - return false - } - - var ec uint32 - if e := syscall.GetExitCodeProcess(h, &ec); e != nil { - return false - } - - return ec == exit_STILL_ACTIVE -} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go deleted file mode 100644 index 0cfc19e..0000000 --- a/vendor/github.com/hashicorp/go-plugin/protocol.go +++ /dev/null @@ -1,45 +0,0 @@ -package plugin - -import ( - "io" - "net" -) - -// Protocol is an enum representing the types of protocols. -type Protocol string - -const ( - ProtocolInvalid Protocol = "" - ProtocolNetRPC Protocol = "netrpc" - ProtocolGRPC Protocol = "grpc" -) - -// ServerProtocol is an interface that must be implemented for new plugin -// protocols to be servers. -type ServerProtocol interface { - // Init is called once to configure and initialize the protocol, but - // not start listening. This is the point at which all validation should - // be done and errors returned. - Init() error - - // Config is extra configuration to be outputted to stdout. This will - // be automatically base64 encoded to ensure it can be parsed properly. - // This can be an empty string if additional configuration is not needed. - Config() string - - // Serve is called to serve connections on the given listener. This should - // continue until the listener is closed. - Serve(net.Listener) -} - -// ClientProtocol is an interface that must be implemented for new plugin -// protocols to be clients. -type ClientProtocol interface { - io.Closer - - // Dispense dispenses a new instance of the plugin with the given name. - Dispense(string) (interface{}, error) - - // Ping checks that the client connection is still healthy. - Ping() error -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go deleted file mode 100644 index f30a4b1..0000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ /dev/null @@ -1,170 +0,0 @@ -package plugin - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/rpc" - - "github.com/hashicorp/yamux" -) - -// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. -type RPCClient struct { - broker *MuxBroker - control *rpc.Client - plugins map[string]Plugin - - // These are the streams used for the various stdout/err overrides - stdout, stderr net.Conn -} - -// newRPCClient creates a new RPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newRPCClient(c *Client) (*RPCClient, error) { - // Connect to the client - conn, err := net.Dial(c.address.Network(), c.address.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - if c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - // Create the actual RPC client - result, err := NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err - } - - // Begin the stream syncing so that stdin, out, err work properly - err = result.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) - if err != nil { - result.Close() - return nil, err - } - - return result, nil -} - -// NewRPCClient creates a client from an already-open connection-like value. -// Dial is typically used instead. -func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { - // Create the yamux client so we can multiplex - mux, err := yamux.Client(conn, nil) - if err != nil { - conn.Close() - return nil, err - } - - // Connect to the control stream. - control, err := mux.Open() - if err != nil { - mux.Close() - return nil, err - } - - // Connect stdout, stderr streams - stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { - stdstream[i], err = mux.Open() - if err != nil { - mux.Close() - return nil, err - } - } - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Build the client using our broker and control channel. - return &RPCClient{ - broker: broker, - control: rpc.NewClient(control), - plugins: plugins, - stdout: stdstream[0], - stderr: stdstream[1], - }, nil -} - -// SyncStreams should be called to enable syncing of stdout, -// stderr with the plugin. -// -// This will return immediately and the syncing will continue to happen -// in the background. You do not need to launch this in a goroutine itself. -// -// This should never be called multiple times. -func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { - go copyStream("stdout", stdout, c.stdout) - go copyStream("stderr", stderr, c.stderr) - return nil -} - -// Close closes the connection. The client is no longer usable after this -// is called. -func (c *RPCClient) Close() error { - // Call the control channel and ask it to gracefully exit. If this - // errors, then we save it so that we always return an error but we - // want to try to close the other channels anyways. - var empty struct{} - returnErr := c.control.Call("Control.Quit", true, &empty) - - // Close the other streams we have - if err := c.control.Close(); err != nil { - return err - } - if err := c.stdout.Close(); err != nil { - return err - } - if err := c.stderr.Close(); err != nil { - return err - } - if err := c.broker.Close(); err != nil { - return err - } - - // Return back the error we got from Control.Quit. This is very important - // since we MUST return non-nil error if this fails so that Client.Kill - // will properly try a process.Kill. - return returnErr -} - -func (c *RPCClient) Dispense(name string) (interface{}, error) { - p, ok := c.plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - var id uint32 - if err := c.control.Call( - "Dispenser.Dispense", name, &id); err != nil { - return nil, err - } - - conn, err := c.broker.Dial(id) - if err != nil { - return nil, err - } - - return p.Client(c.broker, rpc.NewClient(conn)) -} - -// Ping pings the connection to ensure it is still alive. -// -// The error from the RPC call is returned exactly if you want to inspect -// it for further error analysis. Any error returned from here would indicate -// that the connection to the plugin is not healthy. -func (c *RPCClient) Ping() error { - var empty struct{} - return c.control.Call("Control.Ping", true, &empty) -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go deleted file mode 100644 index 5bb18dd..0000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ /dev/null @@ -1,197 +0,0 @@ -package plugin - -import ( - "errors" - "fmt" - "io" - "log" - "net" - "net/rpc" - "sync" - - "github.com/hashicorp/yamux" -) - -// RPCServer listens for network connections and then dispenses interface -// implementations over net/rpc. -// -// After setting the fields below, they shouldn't be read again directly -// from the structure which may be reading/writing them concurrently. -type RPCServer struct { - Plugins map[string]Plugin - - // Stdout, Stderr are what this server will use instead of the - // normal stdin/out/err. This is because due to the multi-process nature - // of our plugin system, we can't use the normal process values so we - // make our own custom one we pipe across. - Stdout io.Reader - Stderr io.Reader - - // DoneCh should be set to a non-nil channel that will be closed - // when the control requests the RPC server to end. - DoneCh chan<- struct{} - - lock sync.Mutex -} - -// ServerProtocol impl. -func (s *RPCServer) Init() error { return nil } - -// ServerProtocol impl. -func (s *RPCServer) Config() string { return "" } - -// ServerProtocol impl. -func (s *RPCServer) Serve(lis net.Listener) { - for { - conn, err := lis.Accept() - if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) - return - } - - go s.ServeConn(conn) - } -} - -// ServeConn runs a single connection. -// -// ServeConn blocks, serving the connection until the client hangs up. -func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { - // First create the yamux server to wrap this connection - mux, err := yamux.Server(conn, nil) - if err != nil { - conn.Close() - log.Printf("[ERR] plugin: error creating yamux server: %s", err) - return - } - - // Accept the control connection - control, err := mux.Accept() - if err != nil { - mux.Close() - if err != io.EOF { - log.Printf("[ERR] plugin: error accepting control connection: %s", err) - } - - return - } - - // Connect the stdstreams (in, out, err) - stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { - stdstream[i], err = mux.Accept() - if err != nil { - mux.Close() - log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) - return - } - } - - // Copy std streams out to the proper place - go copyStream("stdout", stdstream[0], s.Stdout) - go copyStream("stderr", stdstream[1], s.Stderr) - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Use the control connection to build the dispenser and serve the - // connection. - server := rpc.NewServer() - server.RegisterName("Control", &controlServer{ - server: s, - }) - server.RegisterName("Dispenser", &dispenseServer{ - broker: broker, - plugins: s.Plugins, - }) - server.ServeConn(control) -} - -// done is called internally by the control server to trigger the -// doneCh to close which is listened to by the main process to cleanly -// exit. -func (s *RPCServer) done() { - s.lock.Lock() - defer s.lock.Unlock() - - if s.DoneCh != nil { - close(s.DoneCh) - s.DoneCh = nil - } -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type controlServer struct { - server *RPCServer -} - -// Ping can be called to verify the connection (and likely the binary) -// is still alive to a plugin. -func (c *controlServer) Ping( - null bool, response *struct{}) error { - *response = struct{}{} - return nil -} - -func (c *controlServer) Quit( - null bool, response *struct{}) error { - // End the server - c.server.done() - - // Always return true - *response = struct{}{} - - return nil -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type dispenseServer struct { - broker *MuxBroker - plugins map[string]Plugin -} - -func (d *dispenseServer) Dispense( - name string, response *uint32) error { - // Find the function to create this implementation - p, ok := d.plugins[name] - if !ok { - return fmt.Errorf("unknown plugin type: %s", name) - } - - // Create the implementation first so we know if there is an error. - impl, err := p.Server(d.broker) - if err != nil { - // We turn the error into an errors error so that it works across RPC - return errors.New(err.Error()) - } - - // Reserve an ID for our implementation - id := d.broker.NextId() - *response = id - - // Run the rest in a goroutine since it can only happen once this RPC - // call returns. We wait for a connection for the plugin implementation - // and serve it. - go func() { - conn, err := d.broker.Accept(id) - if err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) - return - } - - serve(conn, "Plugin", impl) - }() - - return nil -} - -func serve(conn io.ReadWriteCloser, name string, v interface{}) { - server := rpc.NewServer() - if err := server.RegisterName(name, v); err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) - return - } - - server.ServeConn(conn) -} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go deleted file mode 100644 index e154321..0000000 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ /dev/null @@ -1,310 +0,0 @@ -package plugin - -import ( - "crypto/tls" - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "os/signal" - "runtime" - "strconv" - "sync/atomic" - - "github.com/hashicorp/go-hclog" - - "google.golang.org/grpc" -) - -// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. -// We will increment this whenever we change any protocol behavior. This -// will invalidate any prior plugins but will at least allow us to iterate -// on the core in a safe way. We will do our best to do this very -// infrequently. -const CoreProtocolVersion = 1 - -// HandshakeConfig is the configuration used by client and servers to -// handshake before starting a plugin connection. This is embedded by -// both ServeConfig and ClientConfig. -// -// In practice, the plugin host creates a HandshakeConfig that is exported -// and plugins then can easily consume it. -type HandshakeConfig struct { - // ProtocolVersion is the version that clients must match on to - // agree they can communicate. This should match the ProtocolVersion - // set on ClientConfig when using a plugin. - ProtocolVersion uint - - // MagicCookieKey and value are used as a very basic verification - // that a plugin is intended to be launched. This is not a security - // measure, just a UX feature. If the magic cookie doesn't match, - // we show human-friendly output. - MagicCookieKey string - MagicCookieValue string -} - -// ServeConfig configures what sorts of plugins are served. -type ServeConfig struct { - // HandshakeConfig is the configuration that must match clients. - HandshakeConfig - - // TLSProvider is a function that returns a configured tls.Config. - TLSProvider func() (*tls.Config, error) - - // Plugins are the plugins that are served. - Plugins map[string]Plugin - - // GRPCServer should be non-nil to enable serving the plugins over - // gRPC. This is a function to create the server when needed with the - // given server options. The server options populated by go-plugin will - // be for TLS if set. You may modify the input slice. - // - // Note that the grpc.Server will automatically be registered with - // the gRPC health checking service. This is not optional since go-plugin - // relies on this to implement Ping(). - GRPCServer func([]grpc.ServerOption) *grpc.Server -} - -// Protocol returns the protocol that this server should speak. -func (c *ServeConfig) Protocol() Protocol { - result := ProtocolNetRPC - if c.GRPCServer != nil { - result = ProtocolGRPC - } - - return result -} - -// Serve serves the plugins given by ServeConfig. -// -// Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to os.Stderr. -// -// This is the method that plugins should call in their main() functions. -func Serve(opts *ServeConfig) { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - os.Exit(1) - } - - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - os.Exit(1) - } - - // Logging goes to the original stderr - log.SetOutput(os.Stderr) - - // internal logger to os.Stderr - logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }) - - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - - // Register a listener so we can accept a connection - listener, err := serverListener() - if err != nil { - logger.Error("plugin init error", "error", err) - return - } - - // Close the listener on return. We wrap this in a func() on purpose - // because the "listener" reference may change to TLS. - defer func() { - listener.Close() - }() - - var tlsConfig *tls.Config - if opts.TLSProvider != nil { - tlsConfig, err = opts.TLSProvider() - if err != nil { - logger.Error("plugin tls init", "error", err) - return - } - } - - // Create the channel to tell us when we're done - doneCh := make(chan struct{}) - - // Build the server type - var server ServerProtocol - switch opts.Protocol() { - case ProtocolNetRPC: - // If we have a TLS configuration then we wrap the listener - // ourselves and do it at that level. - if tlsConfig != nil { - listener = tls.NewListener(listener, tlsConfig) - } - - // Create the RPC server to dispense - server = &RPCServer{ - Plugins: opts.Plugins, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - } - - case ProtocolGRPC: - // Create the gRPC server - server = &GRPCServer{ - Plugins: opts.Plugins, - Server: opts.GRPCServer, - TLS: tlsConfig, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - } - - default: - panic("unknown server protocol: " + opts.Protocol()) - } - - // Initialize the servers - if err := server.Init(); err != nil { - logger.Error("protocol init", "error", err) - return - } - - // Build the extra configuration - extra := "" - if v := server.Config(); v != "" { - extra = base64.StdEncoding.EncodeToString([]byte(v)) - } - if extra != "" { - extra = "|" + extra - } - - logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - - // Output the address and service name to stdout so that core can bring it up. - fmt.Printf("%d|%d|%s|%s|%s%s\n", - CoreProtocolVersion, - opts.ProtocolVersion, - listener.Addr().Network(), - listener.Addr().String(), - opts.Protocol(), - extra) - os.Stdout.Sync() - - // Eat the interrupts - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - var count int32 = 0 - for { - <-ch - newCount := atomic.AddInt32(&count, 1) - logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) - } - }() - - // Set our new out, err - os.Stdout = stdout_w - os.Stderr = stderr_w - - // Accept connections and wait for completion - go server.Serve(listener) - <-doneCh -} - -func serverListener() (net.Listener, error) { - if runtime.GOOS == "windows" { - return serverListener_tcp() - } - - return serverListener_unix() -} - -func serverListener_tcp() (net.Listener, error) { - minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) - if err != nil { - return nil, err - } - - maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) - if err != nil { - return nil, err - } - - for port := minPort; port <= maxPort; port++ { - address := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", address) - if err == nil { - return listener, nil - } - } - - return nil, errors.New("Couldn't bind plugin TCP listener") -} - -func serverListener_unix() (net.Listener, error) { - tf, err := ioutil.TempFile("", "plugin") - if err != nil { - return nil, err - } - path := tf.Name() - - // Close the file and remove it because it has to not exist for - // the domain socket. - if err := tf.Close(); err != nil { - return nil, err - } - if err := os.Remove(path); err != nil { - return nil, err - } - - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - - // Wrap the listener in rmListener so that the Unix domain socket file - // is removed on close. - return &rmListener{ - Listener: l, - Path: path, - }, nil -} - -// rmListener is an implementation of net.Listener that forwards most -// calls to the listener but also removes a file as part of the close. We -// use this to cleanup the unix domain socket on close. -type rmListener struct { - net.Listener - Path string -} - -func (l *rmListener) Close() error { - // Close the listener itself - if err := l.Listener.Close(); err != nil { - return err - } - - // Remove the file - return os.Remove(l.Path) -} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go deleted file mode 100644 index 033079e..0000000 --- a/vendor/github.com/hashicorp/go-plugin/server_mux.go +++ /dev/null @@ -1,31 +0,0 @@ -package plugin - -import ( - "fmt" - "os" -) - -// ServeMuxMap is the type that is used to configure ServeMux -type ServeMuxMap map[string]*ServeConfig - -// ServeMux is like Serve, but serves multiple types of plugins determined -// by the argument given on the command-line. -// -// This command doesn't return until the plugin is done being executed. Any -// errors are logged or output to stderr. -func ServeMux(m ServeMuxMap) { - if len(os.Args) != 2 { - fmt.Fprintf(os.Stderr, - "Invoked improperly. This is an internal command that shouldn't\n"+ - "be manually invoked.\n") - os.Exit(1) - } - - opts, ok := m[os.Args[1]] - if !ok { - fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) - os.Exit(1) - } - - Serve(opts) -} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go deleted file mode 100644 index 1d547aa..0000000 --- a/vendor/github.com/hashicorp/go-plugin/stream.go +++ /dev/null @@ -1,18 +0,0 @@ -package plugin - -import ( - "io" - "log" -) - -func copyStream(name string, dst io.Writer, src io.Reader) { - if src == nil { - panic(name + ": src is nil") - } - if dst == nil { - panic(name + ": dst is nil") - } - if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go deleted file mode 100644 index c6bf7c4..0000000 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ /dev/null @@ -1,120 +0,0 @@ -package plugin - -import ( - "bytes" - "net" - "net/rpc" - - "github.com/mitchellh/go-testing-interface" - "google.golang.org/grpc" -) - -// The testing file contains test helpers that you can use outside of -// this package for making it easier to test plugins themselves. - -// TestConn is a helper function for returning a client and server -// net.Conn connected to each other. -func TestConn(t testing.T) (net.Conn, net.Conn) { - // Listen to any local port. This listener will be closed - // after a single connection is established. - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Start a goroutine to accept our client connection - var serverConn net.Conn - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - defer l.Close() - var err error - serverConn, err = l.Accept() - if err != nil { - t.Fatalf("err: %s", err) - } - }() - - // Connect to the server - clientConn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Wait for the server side to acknowledge it has connected - <-doneCh - - return clientConn, serverConn -} - -// TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { - clientConn, serverConn := TestConn(t) - - server := rpc.NewServer() - go server.ServeConn(serverConn) - - client := rpc.NewClient(clientConn) - return client, server -} - -// TestPluginRPCConn returns a plugin RPC client and server that are connected -// together and configured. -func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { - // Create two net.Conns we can use to shuttle our control connection - clientConn, serverConn := TestConn(t) - - // Start up the server - server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} - go server.ServeConn(serverConn) - - // Connect the client to the server - client, err := NewRPCClient(clientConn, ps) - if err != nil { - t.Fatalf("err: %s", err) - } - - return client, server -} - -// TestPluginGRPCConn returns a plugin gRPC client and server that are connected -// together and configured. This is used to test gRPC connections. -func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { - // Create a listener - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Start up the server - server := &GRPCServer{ - Plugins: ps, - Server: DefaultGRPCServer, - Stdout: new(bytes.Buffer), - Stderr: new(bytes.Buffer), - } - if err := server.Init(); err != nil { - t.Fatalf("err: %s", err) - } - go server.Serve(l) - - // Connect to the server - conn, err := grpc.Dial( - l.Addr().String(), - grpc.WithBlock(), - grpc.WithInsecure()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Connection successful, close the listener - l.Close() - - // Create the client - client := &GRPCClient{ - Conn: conn, - Plugins: ps, - } - - return client, server -} diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE deleted file mode 100644 index e87a115..0000000 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md deleted file mode 100644 index 02565c8..0000000 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) - -Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes. - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go deleted file mode 100644 index ff9364c..0000000 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ /dev/null @@ -1,65 +0,0 @@ -package uuid - -import ( - "crypto/rand" - "encoding/hex" - "fmt" -) - -// GenerateRandomBytes is used to generate random bytes of given size. -func GenerateRandomBytes(size int) ([]byte, error) { - buf := make([]byte, size) - if _, err := rand.Read(buf); err != nil { - return nil, fmt.Errorf("failed to read random bytes: %v", err) - } - return buf, nil -} - -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - buf, err := GenerateRandomBytes(16) - if err != nil { - return "", err - } - return FormatUUID(buf) -} - -func FormatUUID(buf []byte) (string, error) { - if len(buf) != 16 { - return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]), nil -} - -func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 36 { - return nil, fmt.Errorf("uuid string is wrong length") - } - - hyph := []byte("-") - - if uuid[8] != hyph[0] || - uuid[13] != hyph[0] || - uuid[18] != hyph[0] || - uuid[23] != hyph[0] { - return nil, fmt.Errorf("uuid is improperly formatted") - } - - hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] - - ret, err := hex.DecodeString(hexStr) - if err != nil { - return nil, err - } - if len(ret) != 16 { - return nil, fmt.Errorf("decoded hex is the wrong length") - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md deleted file mode 100644 index 6f3a15c..0000000 --- a/vendor/github.com/hashicorp/go-version/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) - -go-version is a library for parsing versions and version constraints, -and verifying versions against a set of constraints. go-version -can sort a collection of versions properly, handles prerelease/beta -versions, can increment versions, etc. - -Versions used with go-version must follow [SemVer](http://semver.org/). - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-version -``` - -#### Version Parsing and Comparison - -```go -v1, err := version.NewVersion("1.2") -v2, err := version.NewVersion("1.5+metadata") - -// Comparison example. There is also GreaterThan, Equal, and just -// a simple Compare that returns an int allowing easy >=, <=, etc. -if v1.LessThan(v2) { - fmt.Printf("%s is less than %s", v1, v2) -} -``` - -#### Version Constraints - -```go -v1, err := version.NewVersion("1.2") - -// Constraints example. -constraints, err := version.NewConstraint(">= 1.0, < 1.4") -if constraints.Check(v1) { - fmt.Printf("%s satisfies constraints %s", v1, constraints) -} -``` - -#### Version Sorting - -```go -versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} -versions := make([]*version.Version, len(versionsRaw)) -for i, raw := range versionsRaw { - v, _ := version.NewVersion(raw) - versions[i] = v -} - -// After this, the versions are properly sorted -sort.Sort(version.Collection(versions)) -``` - -## Issues and Contributing - -If you find an issue with this library, please report an issue. If you'd -like, we welcome any contributions. Fork this library and submit a pull -request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go deleted file mode 100644 index 8c73df0..0000000 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ /dev/null @@ -1,178 +0,0 @@ -package version - -import ( - "fmt" - "regexp" - "strings" -) - -// Constraint represents a single constraint for a version, such as -// ">= 1.0". -type Constraint struct { - f constraintFunc - check *Version - original string -} - -// Constraints is a slice of constraints. We make a custom type so that -// we can add methods to it. -type Constraints []*Constraint - -type constraintFunc func(v, c *Version) bool - -var constraintOperators map[string]constraintFunc - -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintFunc{ - "": constraintEqual, - "=": constraintEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "~>": constraintPessimistic, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - -// NewConstraint will parse one or more constraints from the given -// constraint string. The string must be a comma-separated list of -// constraints. -func NewConstraint(v string) (Constraints, error) { - vs := strings.Split(v, ",") - result := make([]*Constraint, len(vs)) - for i, single := range vs { - c, err := parseSingle(single) - if err != nil { - return nil, err - } - - result[i] = c - } - - return Constraints(result), nil -} - -// Check tests if a version satisfies all the constraints. -func (cs Constraints) Check(v *Version) bool { - for _, c := range cs { - if !c.Check(v) { - return false - } - } - - return true -} - -// Returns the string format of the constraints -func (cs Constraints) String() string { - csStr := make([]string, len(cs)) - for i, c := range cs { - csStr[i] = c.String() - } - - return strings.Join(csStr, ",") -} - -// Check tests if a constraint is validated by the given version. -func (c *Constraint) Check(v *Version) bool { - return c.f(v, c.check) -} - -func (c *Constraint) String() string { - return c.original -} - -func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) - } - - check, err := NewVersion(matches[2]) - if err != nil { - return nil, err - } - - return &Constraint{ - f: constraintOperators[matches[1]], - check: check, - original: v, - }, nil -} - -//------------------------------------------------------------------- -// Constraint functions -//------------------------------------------------------------------- - -func constraintEqual(v, c *Version) bool { - return v.Equal(c) -} - -func constraintNotEqual(v, c *Version) bool { - return !v.Equal(c) -} - -func constraintGreaterThan(v, c *Version) bool { - return v.Compare(c) == 1 -} - -func constraintLessThan(v, c *Version) bool { - return v.Compare(c) == -1 -} - -func constraintGreaterThanEqual(v, c *Version) bool { - return v.Compare(c) >= 0 -} - -func constraintLessThanEqual(v, c *Version) bool { - return v.Compare(c) <= 0 -} - -func constraintPessimistic(v, c *Version) bool { - // If the version being checked is naturally less than the constraint, then there - // is no way for the version to be valid against the constraint - if v.LessThan(c) { - return false - } - // We'll use this more than once, so grab the length now so it's a little cleaner - // to write the later checks - cs := len(c.segments) - - // If the version being checked has less specificity than the constraint, then there - // is no way for the version to be valid against the constraint - if cs > len(v.segments) { - return false - } - - // Check the segments in the constraint against those in the version. If the version - // being checked, at any point, does not have the same values in each index of the - // constraints segments, then it cannot be valid against the constraint. - for i := 0; i < c.si-1; i++ { - if v.segments[i] != c.segments[i] { - return false - } - } - - // Check the last part of the segment in the constraint. If the version segment at - // this index is less than the constraints segment at this index, then it cannot - // be valid against the constraint - if c.segments[cs-1] > v.segments[cs-1] { - return false - } - - // If nothing has rejected the version by now, it's valid - return true -} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go deleted file mode 100644 index dfe509c..0000000 --- a/vendor/github.com/hashicorp/go-version/version.go +++ /dev/null @@ -1,322 +0,0 @@ -package version - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// The compiled regular expression used to test the validity of a version. -var versionRegexp *regexp.Regexp - -// The raw regular expression string used for testing the validity -// of a version. -const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `?` - -// Version represents a single version. -type Version struct { - metadata string - pre string - segments []int64 - si int -} - -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") -} - -// NewVersion parses the given version and returns a new -// Version. -func NewVersion(v string) (*Version, error) { - matches := versionRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) - } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int64, len(segmentsStr)) - si := 0 - for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version: %s", err) - } - - segments[i] = int64(val) - si++ - } - - // Even though we could support more than three segments, if we - // got less than three, pad it with 0s. This is to cover the basic - // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum - for i := len(segments); i < 3; i++ { - segments = append(segments, 0) - } - - return &Version{ - metadata: matches[7], - pre: matches[4], - segments: segments, - si: si, - }, nil -} - -// Must is a helper that wraps a call to a function returning (*Version, error) -// and panics if error is non-nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - - return v -} - -// Compare compares this version to another version. This -// returns -1, 0, or 1 if this version is smaller, equal, -// or larger than the other version, respectively. -// -// If you want boolean results, use the LessThan, Equal, -// or GreaterThan methods. -func (v *Version) Compare(other *Version) int { - // A quick, efficient equality check - if v.String() == other.String() { - return 0 - } - - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { - preSelf := v.Prerelease() - preOther := other.Prerelease() - if preSelf == "" && preOther == "" { - return 0 - } - if preSelf == "" { - return 1 - } - if preOther == "" { - return -1 - } - - return comparePrereleases(preSelf, preOther) - } - - // Get the highest specificity (hS), or if they're equal, just use segmentSelf length - lenSelf := len(segmentsSelf) - lenOther := len(segmentsOther) - hS := lenSelf - if lenSelf < lenOther { - hS = lenOther - } - // Compare the segments - // Because a constraint could have more/less specificity than the version it's - // checking, we need to account for a lopsided or jagged comparison - for i := 0; i < hS; i++ { - if i > lenSelf-1 { - // This means Self had the lower specificity - // Check to see if the remaining segments in Other are all zeros - if !allZero(segmentsOther[i:]) { - // if not, it means that Other has to be greater than Self - return -1 - } - break - } else if i > lenOther-1 { - // this means Other had the lower specificity - // Check to see if the remaining segments in Self are all zeros - - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other - return 1 - } - break - } - lhs := segmentsSelf[i] - rhs := segmentsOther[i] - if lhs == rhs { - continue - } else if lhs < rhs { - return -1 - } - // Otherwis, rhs was > lhs, they're not equal - return 1 - } - - // if we got this far, they're equal - return 0 -} - -func allZero(segs []int64) bool { - for _, s := range segs { - if s != 0 { - return false - } - } - return true -} - -func comparePart(preSelf string, preOther string) int { - if preSelf == preOther { - return 0 - } - - selfNumeric := true - _, err := strconv.ParseInt(preSelf, 10, 64) - if err != nil { - selfNumeric = false - } - - otherNumeric := true - _, err = strconv.ParseInt(preOther, 10, 64) - if err != nil { - otherNumeric = false - } - - // if a part is empty, we use the other to decide - if preSelf == "" { - if otherNumeric { - return -1 - } - return 1 - } - - if preOther == "" { - if selfNumeric { - return 1 - } - return -1 - } - - if selfNumeric && !otherNumeric { - return -1 - } else if !selfNumeric && otherNumeric { - return 1 - } else if preSelf > preOther { - return 1 - } - - return -1 -} - -func comparePrereleases(v string, other string) int { - // the same pre release! - if v == other { - return 0 - } - - // split both pre releases for analyse their parts - selfPreReleaseMeta := strings.Split(v, ".") - otherPreReleaseMeta := strings.Split(other, ".") - - selfPreReleaseLen := len(selfPreReleaseMeta) - otherPreReleaseLen := len(otherPreReleaseMeta) - - biggestLen := otherPreReleaseLen - if selfPreReleaseLen > otherPreReleaseLen { - biggestLen = selfPreReleaseLen - } - - // loop for parts to find the first difference - for i := 0; i < biggestLen; i = i + 1 { - partSelfPre := "" - if i < selfPreReleaseLen { - partSelfPre = selfPreReleaseMeta[i] - } - - partOtherPre := "" - if i < otherPreReleaseLen { - partOtherPre = otherPreReleaseMeta[i] - } - - compare := comparePart(partSelfPre, partOtherPre) - // if parts are equals, continue the loop - if compare != 0 { - return compare - } - } - - return 0 -} - -// Equal tests if two versions are equal. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// GreaterThan tests if this version is greater than another version. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// LessThan tests if this version is less than another version. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// Metadata returns any metadata that was part of the version -// string. -// -// Metadata is anything that comes after the "+" in the version. -// For example, with "1.2.3+beta", the metadata is "beta". -func (v *Version) Metadata() string { - return v.metadata -} - -// Prerelease returns any prerelease data that is part of the version, -// or blank if there is no prerelease data. -// -// Prerelease information is anything that comes after the "-" in the -// version (but before any metadata). For example, with "1.2.3-beta", -// the prerelease information is "beta". -func (v *Version) Prerelease() string { - return v.pre -} - -// Segments returns the numeric segments of the version as a slice of ints. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments() []int { - segmentSlice := make([]int, len(v.segments)) - for i, v := range v.segments { - segmentSlice[i] = int(v) - } - return segmentSlice -} - -// Segments64 returns the numeric segments of the version as a slice of int64s. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments64() []int64 { - return v.segments -} - -// String returns the full version string included pre-release -// and metadata information. -func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) - for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str - } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go deleted file mode 100644 index cc888d4..0000000 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -// Collection is a type that implements the sort.Interface interface -// so that versions can be sorted. -type Collection []*Version - -func (v Collection) Len() int { - return len(v) -} - -func (v Collection) Less(i, j int) bool { - return v[i].LessThan(v[j]) -} - -func (v Collection) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743..0000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c822332..0000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index 6e75ece..0000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,724 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - field.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, field); err != nil { - return err - } - } - - decodedFields = append(decodedFields, fieldType.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef65..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad4..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index b488180..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 6966236..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,651 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // If we see a null character with data left, then that is an error - if ch == '\x00' && s.buf.Len() > 0 { - s.err("unexpected null character (0x00)") - return eof - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981ea..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb7..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c066..0000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f0..0000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index dd5c72b..0000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb7..0000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3e..0000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c2..0000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c..0000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go b/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go deleted file mode 100644 index c689f73..0000000 --- a/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go +++ /dev/null @@ -1,15 +0,0 @@ -package testhelper - -import ( - "runtime" - "strings" -) - -// Converts the line endings when on Windows -func Unix2dos(unix string) string { - if runtime.GOOS != "windows" { - return unix - } - - return strings.Replace(unix, "\n", "\r\n", -1) -} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE deleted file mode 100644 index 82b4de9..0000000 --- a/vendor/github.com/hashicorp/hil/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md deleted file mode 100644 index 186ed25..0000000 --- a/vendor/github.com/hashicorp/hil/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# HIL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) - -HIL (HashiCorp Interpolation Language) is a lightweight embedded language used -primarily for configuration interpolation. The goal of HIL is to make a simple -language for interpolations in the various configurations of HashiCorp tools. - -HIL is built to interpolate any string, but is in use by HashiCorp primarily -with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any -way for use with HIL. - -HIL isn't meant to be a general purpose language. It was built for basic -configuration interpolations. Therefore, you can't currently write functions, -have conditionals, set intermediary variables, etc. within HIL itself. It is -possible some of these may be added later but the right use case must exist. - -## Why? - -Many of our tools have support for something similar to templates, but -within the configuration itself. The most prominent requirement was in -[Terraform](https://github.com/hashicorp/terraform) where we wanted the -configuration to be able to reference values from elsewhere in the -configuration. Example: - - foo = "hi ${var.world}" - -We originally used a full templating language for this, but found it -was too heavy weight. Additionally, many full languages required bindings -to C (and thus the usage of cgo) which we try to avoid to make cross-compilation -easier. We then moved to very basic regular expression based -string replacement, but found the need for basic arithmetic and function -calls resulting in overly complex regular expressions. - -Ultimately, we wrote our own mini-language within Terraform itself. As -we built other projects such as [Nomad](https://nomadproject.io) and -[Otto](https://ottoproject.io), the need for basic interpolations arose -again. - -Thus HIL was born. It is extracted from Terraform, cleaned up, and -better tested for general purpose use. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammer is listed here. - -Code begins within `${` and `}`. Outside of this, text is treated -literally. For example, `foo` is a valid HIL program that is just the -string "foo", but `foo ${bar}` is an HIL program that is the string "foo " -concatened with the value of `bar`. For the remainder of the syntax -docs, we'll assume you're within `${}`. - - * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example - identifiers: `foo`, `var.foo`, `foo-bar`. - - * Strings are double quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Numbers are assumed to be base 10. If you prefix a number with 0x, - it is treated as a hexadecimal. If it is prefixed with 0, it is - treated as an octal. Numbers can be in scientific notation: "1e10". - - * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` - - * Boolean values: `true`, `false` - - * The following arithmetic operations are allowed: +, -, *, /, %. - - * Function calls are in the form of `name(arg1, arg2, ...)`. Example: - `add(1, 5)`. Arguments can be any valid HIL expression, example: - `add(1, var.foo)` or even nested function calls: - `add(1, get("some value"))`. - - * Within strings, further interpolations can be opened with `${}`. - Example: `"Hello ${nested}"`. A full example including the - original `${}` (remember this list assumes were inside of one - already) could be: `foo ${func("hello ${var.foo}")}`. - -## Language Changes - -We've used this mini-language in Terraform for years. For backwards compatibility -reasons, we're unlikely to make an incompatible change to the language but -we're not currently making that promise, either. - -The internal API of this project may very well change as we evolve it -to work with more of our projects. We recommend using some sort of dependency -management solution with this package. - -## Future Changes - -The following changes are already planned to be made at some point: - - * Richer types: lists, maps, etc. - - * Convert to a more standard Go parser structure similar to HCL. This - will improve our error messaging as well as allow us to have automatic - formatting. - - * Allow interpolations to result in more types than just a string. While - within the interpolation basic types are honored, the result is always - a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml deleted file mode 100644 index feaf7a3..0000000 --- a/vendor/github.com/hashicorp/hil/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hil -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf true -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go deleted file mode 100644 index 94dc24f..0000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic.go +++ /dev/null @@ -1,43 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Arithmetic represents a node where the result is arithmetic of -// two or more operands in the order given. -type Arithmetic struct { - Op ArithmeticOp - Exprs []Node - Posx Pos -} - -func (n *Arithmetic) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Arithmetic) Pos() Pos { - return n.Posx -} - -func (n *Arithmetic) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Arithmetic) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Arithmetic) Type(Scope) (Type, error) { - return TypeInt, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go deleted file mode 100644 index 18880c6..0000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go +++ /dev/null @@ -1,24 +0,0 @@ -package ast - -// ArithmeticOp is the operation to use for the math. -type ArithmeticOp int - -const ( - ArithmeticOpInvalid ArithmeticOp = 0 - - ArithmeticOpAdd ArithmeticOp = iota - ArithmeticOpSub - ArithmeticOpMul - ArithmeticOpDiv - ArithmeticOpMod - - ArithmeticOpLogicalAnd - ArithmeticOpLogicalOr - - ArithmeticOpEqual - ArithmeticOpNotEqual - ArithmeticOpLessThan - ArithmeticOpLessThanOrEqual - ArithmeticOpGreaterThan - ArithmeticOpGreaterThanOrEqual -) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go deleted file mode 100644 index c6350f8..0000000 --- a/vendor/github.com/hashicorp/hil/ast/ast.go +++ /dev/null @@ -1,99 +0,0 @@ -package ast - -import ( - "fmt" -) - -// Node is the interface that all AST nodes must implement. -type Node interface { - // Accept is called to dispatch to the visitors. It must return the - // resulting Node (which might be different in an AST transform). - Accept(Visitor) Node - - // Pos returns the position of this node in some source. - Pos() Pos - - // Type returns the type of this node for the given context. - Type(Scope) (Type, error) -} - -// Pos is the starting position of an AST node -type Pos struct { - Column, Line int // Column/Line number, starting at 1 - Filename string // Optional source filename, if known -} - -func (p Pos) String() string { - if p.Filename == "" { - return fmt.Sprintf("%d:%d", p.Line, p.Column) - } else { - return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) - } -} - -// InitPos is an initiaial position value. This should be used as -// the starting position (presets the column and line to 1). -var InitPos = Pos{Column: 1, Line: 1} - -// Visitors are just implementations of this function. -// -// The function must return the Node to replace this node with. "nil" is -// _not_ a valid return value. If there is no replacement, the original node -// should be returned. We build this replacement directly into the visitor -// pattern since AST transformations are a common and useful tool and -// building it into the AST itself makes it required for future Node -// implementations and very easy to do. -// -// Note that this isn't a true implementation of the visitor pattern, which -// generally requires proper type dispatch on the function. However, -// implementing this basic visitor pattern style is still very useful even -// if you have to type switch. -type Visitor func(Node) Node - -//go:generate stringer -type=Type - -// Type is the type of any value. -type Type uint32 - -const ( - TypeInvalid Type = 0 - TypeAny Type = 1 << iota - TypeBool - TypeString - TypeInt - TypeFloat - TypeList - TypeMap - - // This is a special type used by Terraform to mark "unknown" values. - // It is impossible for this type to be introduced into your HIL programs - // unless you explicitly set a variable to this value. In that case, - // any operation including the variable will return "TypeUnknown" as the - // type. - TypeUnknown -) - -func (t Type) Printable() string { - switch t { - case TypeInvalid: - return "invalid type" - case TypeAny: - return "any type" - case TypeBool: - return "type bool" - case TypeString: - return "type string" - case TypeInt: - return "type int" - case TypeFloat: - return "type float" - case TypeList: - return "type list" - case TypeMap: - return "type map" - case TypeUnknown: - return "type unknown" - default: - return "unknown type" - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go deleted file mode 100644 index 0557011..0000000 --- a/vendor/github.com/hashicorp/hil/ast/call.go +++ /dev/null @@ -1,47 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Call represents a function call. -type Call struct { - Func string - Args []Node - Posx Pos -} - -func (n *Call) Accept(v Visitor) Node { - for i, a := range n.Args { - n.Args[i] = a.Accept(v) - } - - return v(n) -} - -func (n *Call) Pos() Pos { - return n.Posx -} - -func (n *Call) String() string { - args := make([]string, len(n.Args)) - for i, arg := range n.Args { - args[i] = fmt.Sprintf("%s", arg) - } - - return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) -} - -func (n *Call) Type(s Scope) (Type, error) { - f, ok := s.LookupFunc(n.Func) - if !ok { - return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) - } - - return f.ReturnType, nil -} - -func (n *Call) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go deleted file mode 100644 index be48f89..0000000 --- a/vendor/github.com/hashicorp/hil/ast/conditional.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -type Conditional struct { - CondExpr Node - TrueExpr Node - FalseExpr Node - Posx Pos -} - -// Accept passes the given visitor to the child nodes in this order: -// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. -func (n *Conditional) Accept(v Visitor) Node { - n.CondExpr = n.CondExpr.Accept(v) - n.TrueExpr = n.TrueExpr.Accept(v) - n.FalseExpr = n.FalseExpr.Accept(v) - - return v(n) -} - -func (n *Conditional) Pos() Pos { - return n.Posx -} - -func (n *Conditional) Type(Scope) (Type, error) { - // This is not actually a useful value; the type checker ignores - // this function when analyzing conditionals, just as with Arithmetic. - return TypeInt, nil -} - -func (n *Conditional) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go deleted file mode 100644 index 860c25f..0000000 --- a/vendor/github.com/hashicorp/hil/ast/index.go +++ /dev/null @@ -1,76 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Index represents an indexing operation into another data structure -type Index struct { - Target Node - Key Node - Posx Pos -} - -func (n *Index) Accept(v Visitor) Node { - n.Target = n.Target.Accept(v) - n.Key = n.Key.Accept(v) - return v(n) -} - -func (n *Index) Pos() Pos { - return n.Posx -} - -func (n *Index) String() string { - return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) -} - -func (n *Index) Type(s Scope) (Type, error) { - variableAccess, ok := n.Target.(*VariableAccess) - if !ok { - return TypeInvalid, fmt.Errorf("target is not a variable") - } - - variable, ok := s.LookupVar(variableAccess.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) - } - - switch variable.Type { - case TypeList: - return n.typeList(variable, variableAccess.Name) - case TypeMap: - return n.typeMap(variable, variableAccess.Name) - default: - return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (n *Index) typeList(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a list - list := variable.Value.([]Variable) - - return VariableListElementTypesAreHomogenous(variableName, list) -} - -func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a map - vmap := variable.Value.(map[string]Variable) - - return VariableMapValueTypesAreHomogenous(variableName, vmap) -} - -func reportTypes(typesFound map[Type]struct{}) string { - stringTypes := make([]string, len(typesFound)) - i := 0 - for k, _ := range typesFound { - stringTypes[0] = k.String() - i++ - } - return strings.Join(stringTypes, ", ") -} - -func (n *Index) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go deleted file mode 100644 index da6014f..0000000 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ /dev/null @@ -1,88 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// LiteralNode represents a single literal value, such as "foo" or -// 42 or 3.14159. Based on the Type, the Value can be safely cast. -type LiteralNode struct { - Value interface{} - Typex Type - Posx Pos -} - -// NewLiteralNode returns a new literal node representing the given -// literal Go value, which must correspond to one of the primitive types -// supported by HIL. Lists and maps cannot currently be constructed via -// this function. -// -// If an inappropriately-typed value is provided, this function will -// return an error. The main intended use of this function is to produce -// "synthetic" literals from constants in code, where the value type is -// well known at compile time. To easily store these in global variables, -// see also MustNewLiteralNode. -func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { - goType := reflect.TypeOf(value) - var hilType Type - - switch goType.Kind() { - case reflect.Bool: - hilType = TypeBool - case reflect.Int: - hilType = TypeInt - case reflect.Float64: - hilType = TypeFloat - case reflect.String: - hilType = TypeString - default: - return nil, fmt.Errorf("unsupported literal node type: %T", value) - } - - return &LiteralNode{ - Value: value, - Typex: hilType, - Posx: pos, - }, nil -} - -// MustNewLiteralNode wraps NewLiteralNode and panics if an error is -// returned, thus allowing valid literal nodes to be easily assigned to -// global variables. -func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { - node, err := NewLiteralNode(value, pos) - if err != nil { - panic(err) - } - return node -} - -func (n *LiteralNode) Accept(v Visitor) Node { - return v(n) -} - -func (n *LiteralNode) Pos() Pos { - return n.Posx -} - -func (n *LiteralNode) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *LiteralNode) String() string { - return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) -} - -func (n *LiteralNode) Type(Scope) (Type, error) { - return n.Typex, nil -} - -// IsUnknown returns true either if the node's value is itself unknown -// of if it is a collection containing any unknown elements, deeply. -func (n *LiteralNode) IsUnknown() bool { - return IsUnknown(Variable{ - Type: n.Typex, - Value: n.Value, - }) -} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go deleted file mode 100644 index 1e27f97..0000000 --- a/vendor/github.com/hashicorp/hil/ast/output.go +++ /dev/null @@ -1,78 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Output represents the root node of all interpolation evaluations. If the -// output only has one expression which is either a TypeList or TypeMap, the -// Output can be type-asserted to []interface{} or map[string]interface{} -// respectively. Otherwise the Output evaluates as a string, and concatenates -// the evaluation of each expression. -type Output struct { - Exprs []Node - Posx Pos -} - -func (n *Output) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Output) Pos() Pos { - return n.Posx -} - -func (n *Output) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Output) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Output) Type(s Scope) (Type, error) { - // Special case no expressions for backward compatibility - if len(n.Exprs) == 0 { - return TypeString, nil - } - - // Special case a single expression of types list or map - if len(n.Exprs) == 1 { - exprType, err := n.Exprs[0].Type(s) - if err != nil { - return TypeInvalid, err - } - switch exprType { - case TypeList: - return TypeList, nil - case TypeMap: - return TypeMap, nil - } - } - - // Otherwise ensure all our expressions are strings - for index, expr := range n.Exprs { - exprType, err := expr.Type(s) - if err != nil { - return TypeInvalid, err - } - // We only look for things we know we can't coerce with an implicit conversion func - if exprType == TypeList || exprType == TypeMap { - return TypeInvalid, fmt.Errorf( - "multi-expression HIL outputs may only have string inputs: %d is type %s", - index, exprType) - } - } - - return TypeString, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go deleted file mode 100644 index 7a975d9..0000000 --- a/vendor/github.com/hashicorp/hil/ast/scope.go +++ /dev/null @@ -1,90 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// Scope is the interface used to look up variables and functions while -// evaluating. How these functions/variables are defined are up to the caller. -type Scope interface { - LookupFunc(string) (Function, bool) - LookupVar(string) (Variable, bool) -} - -// Variable is a variable value for execution given as input to the engine. -// It records the value of a variables along with their type. -type Variable struct { - Value interface{} - Type Type -} - -// NewVariable creates a new Variable for the given value. This will -// attempt to infer the correct type. If it can't, an error will be returned. -func NewVariable(v interface{}) (result Variable, err error) { - switch v := reflect.ValueOf(v); v.Kind() { - case reflect.String: - result.Type = TypeString - default: - err = fmt.Errorf("Unknown type: %s", v.Kind()) - } - - result.Value = v - return -} - -// String implements Stringer on Variable, displaying the type and value -// of the Variable. -func (v Variable) String() string { - return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) -} - -// Function defines a function that can be executed by the engine. -// The type checker will validate that the proper types will be called -// to the callback. -type Function struct { - // ArgTypes is the list of types in argument order. These are the - // required arguments. - // - // ReturnType is the type of the returned value. The Callback MUST - // return this type. - ArgTypes []Type - ReturnType Type - - // Variadic, if true, says that this function is variadic, meaning - // it takes a variable number of arguments. In this case, the - // VariadicType must be set. - Variadic bool - VariadicType Type - - // Callback is the function called for a function. The argument - // types are guaranteed to match the spec above by the type checker. - // The length of the args is strictly == len(ArgTypes) unless Varidiac - // is true, in which case its >= len(ArgTypes). - Callback func([]interface{}) (interface{}, error) -} - -// BasicScope is a simple scope that looks up variables and functions -// using a map. -type BasicScope struct { - FuncMap map[string]Function - VarMap map[string]Variable -} - -func (s *BasicScope) LookupFunc(n string) (Function, bool) { - if s == nil { - return Function{}, false - } - - v, ok := s.FuncMap[n] - return v, ok -} - -func (s *BasicScope) LookupVar(n string) (Variable, bool) { - if s == nil { - return Variable{}, false - } - - v, ok := s.VarMap[n] - return v, ok -} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go deleted file mode 100644 index bd2bc15..0000000 --- a/vendor/github.com/hashicorp/hil/ast/stack.go +++ /dev/null @@ -1,25 +0,0 @@ -package ast - -// Stack is a stack of Node. -type Stack struct { - stack []Node -} - -func (s *Stack) Len() int { - return len(s.stack) -} - -func (s *Stack) Push(n Node) { - s.stack = append(s.stack, n) -} - -func (s *Stack) Pop() Node { - x := s.stack[len(s.stack)-1] - s.stack[len(s.stack)-1] = nil - s.stack = s.stack[:len(s.stack)-1] - return x -} - -func (s *Stack) Reset() { - s.stack = nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go deleted file mode 100644 index 1f51a98..0000000 --- a/vendor/github.com/hashicorp/hil/ast/type_string.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT - -package ast - -import "fmt" - -const ( - _Type_name_0 = "TypeInvalid" - _Type_name_1 = "TypeAny" - _Type_name_2 = "TypeBool" - _Type_name_3 = "TypeString" - _Type_name_4 = "TypeInt" - _Type_name_5 = "TypeFloat" - _Type_name_6 = "TypeList" - _Type_name_7 = "TypeMap" - _Type_name_8 = "TypeUnknown" -) - -var ( - _Type_index_0 = [...]uint8{0, 11} - _Type_index_1 = [...]uint8{0, 7} - _Type_index_2 = [...]uint8{0, 8} - _Type_index_3 = [...]uint8{0, 10} - _Type_index_4 = [...]uint8{0, 7} - _Type_index_5 = [...]uint8{0, 9} - _Type_index_6 = [...]uint8{0, 8} - _Type_index_7 = [...]uint8{0, 7} - _Type_index_8 = [...]uint8{0, 11} -) - -func (i Type) String() string { - switch { - case i == 0: - return _Type_name_0 - case i == 2: - return _Type_name_1 - case i == 4: - return _Type_name_2 - case i == 8: - return _Type_name_3 - case i == 16: - return _Type_name_4 - case i == 32: - return _Type_name_5 - case i == 64: - return _Type_name_6 - case i == 128: - return _Type_name_7 - case i == 256: - return _Type_name_8 - default: - return fmt.Sprintf("Type(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go deleted file mode 100644 index d6ddaec..0000000 --- a/vendor/github.com/hashicorp/hil/ast/unknown.go +++ /dev/null @@ -1,30 +0,0 @@ -package ast - -// IsUnknown reports whether a variable is unknown or contains any value -// that is unknown. This will recurse into lists and maps and so on. -func IsUnknown(v Variable) bool { - // If it is unknown itself, return true - if v.Type == TypeUnknown { - return true - } - - // If it is a container type, check the values - switch v.Type { - case TypeList: - for _, el := range v.Value.([]Variable) { - if IsUnknown(el) { - return true - } - } - case TypeMap: - for _, el := range v.Value.(map[string]Variable) { - if IsUnknown(el) { - return true - } - } - default: - } - - // Not a container type or survive the above checks - return false -} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go deleted file mode 100644 index 4c1362d..0000000 --- a/vendor/github.com/hashicorp/hil/ast/variable_access.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -// VariableAccess represents a variable access. -type VariableAccess struct { - Name string - Posx Pos -} - -func (n *VariableAccess) Accept(v Visitor) Node { - return v(n) -} - -func (n *VariableAccess) Pos() Pos { - return n.Posx -} - -func (n *VariableAccess) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *VariableAccess) String() string { - return fmt.Sprintf("Variable(%s)", n.Name) -} - -func (n *VariableAccess) Type(s Scope) (Type, error) { - v, ok := s.LookupVar(n.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) - } - - return v.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go deleted file mode 100644 index 06bd18d..0000000 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ /dev/null @@ -1,63 +0,0 @@ -package ast - -import "fmt" - -func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - if len(list) == 0 { - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range list { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "list %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} - -func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - if len(vmap) == 0 { - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range vmap { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "map %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go deleted file mode 100644 index 909c788..0000000 --- a/vendor/github.com/hashicorp/hil/builtins.go +++ /dev/null @@ -1,331 +0,0 @@ -package hil - -import ( - "errors" - "strconv" - - "github.com/hashicorp/hil/ast" -) - -// NOTE: All builtins are tested in engine_test.go - -func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { - if scope == nil { - scope = new(ast.BasicScope) - } - if scope.FuncMap == nil { - scope.FuncMap = make(map[string]ast.Function) - } - - // Implicit conversions - scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() - scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() - scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() - scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() - scope.FuncMap["__builtin_IntToString"] = builtinIntToString() - scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() - scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() - scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() - - // Math operations - scope.FuncMap["__builtin_IntMath"] = builtinIntMath() - scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() - scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() - scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() - scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() - scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() - scope.FuncMap["__builtin_Logical"] = builtinLogical() - return scope -} - -func builtinFloatMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeFloat, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(float64) - for _, raw := range args[2:] { - arg := raw.(float64) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - result /= arg - } - } - - return result, nil - }, - } -} - -func builtinIntMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeInt, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(int) - for _, raw := range args[2:] { - arg := raw.(int) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result /= arg - case ast.ArithmeticOpMod: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result = result % arg - } - } - - return result, nil - }, - } -} - -func builtinBoolCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(bool) - rhs := args[2].(bool) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinFloatCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(float64) - rhs := args[2].(float64) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinIntCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(int) - rhs := args[2].(int) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinStringCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(string) - rhs := args[2].(string) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinLogical() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeBool, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(bool) - for _, raw := range args[2:] { - arg := raw.(bool) - switch op { - case ast.ArithmeticOpLogicalOr: - result = result || arg - case ast.ArithmeticOpLogicalAnd: - result = result && arg - default: - return nil, errors.New("invalid logical operator") - } - } - - return result, nil - }, - } -} - -func builtinFloatToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(args[0].(float64)), nil - }, - } -} - -func builtinFloatToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatFloat( - args[0].(float64), 'g', -1, 64), nil - }, - } -} - -func builtinIntToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return float64(args[0].(int)), nil - }, - } -} - -func builtinIntToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatInt(int64(args[0].(int)), 10), nil - }, - } -} - -func builtinStringToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseInt(args[0].(string), 0, 0) - if err != nil { - return nil, err - } - - return int(v), nil - }, - } -} - -func builtinStringToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseFloat(args[0].(string), 64) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} - -func builtinBoolToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeBool}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatBool(args[0].(bool)), nil - }, - } -} - -func builtinStringToBool() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseBool(args[0].(string)) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go deleted file mode 100644 index 474f505..0000000 --- a/vendor/github.com/hashicorp/hil/check_identifier.go +++ /dev/null @@ -1,88 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// IdentifierCheck is a SemanticCheck that checks that all identifiers -// resolve properly and that the right number of arguments are passed -// to functions. -type IdentifierCheck struct { - Scope ast.Scope - - err error - lock sync.Mutex -} - -func (c *IdentifierCheck) Visit(root ast.Node) error { - c.lock.Lock() - defer c.lock.Unlock() - defer c.reset() - root.Accept(c.visit) - return c.err -} - -func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { - if c.err != nil { - return raw - } - - switch n := raw.(type) { - case *ast.Call: - c.visitCall(n) - case *ast.VariableAccess: - c.visitVariableAccess(n) - case *ast.Output: - // Ignore - case *ast.LiteralNode: - // Ignore - default: - // Ignore - } - - // We never do replacement with this visitor - return raw -} - -func (c *IdentifierCheck) visitCall(n *ast.Call) { - // Look up the function in the map - function, ok := c.Scope.LookupFunc(n.Func) - if !ok { - c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) - return - } - - // Break up the args into what is variadic and what is required - args := n.Args - if function.Variadic && len(args) > len(function.ArgTypes) { - args = n.Args[:len(function.ArgTypes)] - } - - // Verify the number of arguments - if len(args) != len(function.ArgTypes) { - c.createErr(n, fmt.Sprintf( - "%s: expected %d arguments, got %d", - n.Func, len(function.ArgTypes), len(n.Args))) - return - } -} - -func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { - // Look up the variable in the map - if _, ok := c.Scope.LookupVar(n.Name); !ok { - c.createErr(n, fmt.Sprintf( - "unknown variable accessed: %s", n.Name)) - return - } -} - -func (c *IdentifierCheck) createErr(n ast.Node, str string) { - c.err = fmt.Errorf("%s: %s", n.Pos(), str) -} - -func (c *IdentifierCheck) reset() { - c.err = nil -} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go deleted file mode 100644 index f16da39..0000000 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ /dev/null @@ -1,668 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// TypeCheck implements ast.Visitor for type checking an AST tree. -// It requires some configuration to look up the type of nodes. -// -// It also optionally will not type error and will insert an implicit -// type conversions for specific types if specified by the Implicit -// field. Note that this is kind of organizationally weird to put into -// this structure but we'd rather do that than duplicate the type checking -// logic multiple times. -type TypeCheck struct { - Scope ast.Scope - - // Implicit is a map of implicit type conversions that we can do, - // and that shouldn't error. The key of the first map is the from type, - // the key of the second map is the to type, and the final string - // value is the function to call (which must be registered in the Scope). - Implicit map[ast.Type]map[ast.Type]string - - // Stack of types. This shouldn't be used directly except by implementations - // of TypeCheckNode. - Stack []ast.Type - - err error - lock sync.Mutex -} - -// TypeCheckNode is the interface that must be implemented by any -// ast.Node that wants to support type-checking. If the type checker -// encounters a node that doesn't implement this, it will error. -type TypeCheckNode interface { - TypeCheck(*TypeCheck) (ast.Node, error) -} - -func (v *TypeCheck) Visit(root ast.Node) error { - v.lock.Lock() - defer v.lock.Unlock() - defer v.reset() - root.Accept(v.visit) - - // If the resulting type is unknown, then just let the whole thing go. - if v.err == errExitUnknown { - v.err = nil - } - - return v.err -} - -func (v *TypeCheck) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - var result ast.Node - var err error - switch n := raw.(type) { - case *ast.Arithmetic: - tc := &typeCheckArithmetic{n} - result, err = tc.TypeCheck(v) - case *ast.Call: - tc := &typeCheckCall{n} - result, err = tc.TypeCheck(v) - case *ast.Conditional: - tc := &typeCheckConditional{n} - result, err = tc.TypeCheck(v) - case *ast.Index: - tc := &typeCheckIndex{n} - result, err = tc.TypeCheck(v) - case *ast.Output: - tc := &typeCheckOutput{n} - result, err = tc.TypeCheck(v) - case *ast.LiteralNode: - tc := &typeCheckLiteral{n} - result, err = tc.TypeCheck(v) - case *ast.VariableAccess: - tc := &typeCheckVariableAccess{n} - result, err = tc.TypeCheck(v) - default: - tc, ok := raw.(TypeCheckNode) - if !ok { - err = fmt.Errorf("unknown node for type check: %#v", raw) - break - } - - result, err = tc.TypeCheck(v) - } - - if err != nil { - pos := raw.Pos() - v.err = fmt.Errorf("At column %d, line %d: %s", - pos.Column, pos.Line, err) - } - - return result -} - -type typeCheckArithmetic struct { - n *ast.Arithmetic -} - -func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { - // The arguments are on the stack in reverse order, so pop them off. - exprs := make([]ast.Type, len(tc.n.Exprs)) - for i, _ := range tc.n.Exprs { - exprs[len(tc.n.Exprs)-1-i] = v.StackPop() - } - - // If any operand is unknown then our result is automatically unknown - for _, ty := range exprs { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - switch tc.n.Op { - case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: - return tc.checkLogical(v, exprs) - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, - ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, - ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: - return tc.checkComparison(v, exprs) - default: - return tc.checkNumeric(v, exprs) - } - -} - -func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - // Determine the resulting type we want. We do this by going over - // every expression until we find one with a type we recognize. - // We do this because the first expr might be a string ("var.foo") - // and we need to know what to implicit to. - mathFunc := "__builtin_IntMath" - mathType := ast.TypeInt - for _, v := range exprs { - // We assume int math but if we find ANY float, the entire - // expression turns into floating point math. - if v == ast.TypeFloat { - mathFunc = "__builtin_FloatMath" - mathType = v - break - } - } - - // Verify the args - for i, arg := range exprs { - if arg != mathType { - cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, mathType, arg) - } - } - - // Modulo doesn't work for floats - if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { - return nil, fmt.Errorf("modulo cannot be used with floats") - } - - // Return type - v.StackPush(mathType) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: mathFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - if len(exprs) != 2 { - // This should never happen, because the parser never produces - // nodes that violate this. - return nil, fmt.Errorf( - "comparison operators must have exactly two operands", - ) - } - - // The first operand always dictates the type for a comparison. - compareFunc := "" - compareType := exprs[0] - switch compareType { - case ast.TypeBool: - compareFunc = "__builtin_BoolCompare" - case ast.TypeFloat: - compareFunc = "__builtin_FloatCompare" - case ast.TypeInt: - compareFunc = "__builtin_IntCompare" - case ast.TypeString: - compareFunc = "__builtin_StringCompare" - default: - return nil, fmt.Errorf( - "comparison operators apply only to bool, float, int, and string", - ) - } - - // For non-equality comparisons, we will do implicit conversions to - // integer types if possible. In this case, we need to go through and - // determine the type of comparison we're doing to enable the implicit - // conversion. - if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { - compareFunc = "__builtin_IntCompare" - compareType = ast.TypeInt - for _, expr := range exprs { - if expr == ast.TypeFloat { - compareFunc = "__builtin_FloatCompare" - compareType = ast.TypeFloat - break - } - } - } - - // Verify (and possibly, convert) the args - for i, arg := range exprs { - if arg != compareType { - cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, compareType, arg, - ) - } - } - - // Only ints and floats can have the <, >, <= and >= operators applied - switch tc.n.Op { - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: - // anything goes - default: - switch compareType { - case ast.TypeFloat, ast.TypeInt: - // fine - default: - return nil, fmt.Errorf( - "<, >, <= and >= may apply only to int and float values", - ) - } - } - - // Comparison operators always return bool - v.StackPush(ast.TypeBool) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: compareFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - for i, t := range exprs { - if t != ast.TypeBool { - cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) - if cn == nil { - return nil, fmt.Errorf( - "logical operators require boolean operands, not %s", - t, - ) - } - tc.n.Exprs[i] = cn - } - } - - // Return type is always boolean - v.StackPush(ast.TypeBool) - - // Arithmetic nodes are replaced with a call to a built-in function - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: "__builtin_Logical", - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -type typeCheckCall struct { - n *ast.Call -} - -func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the function in the map - function, ok := v.Scope.LookupFunc(tc.n.Func) - if !ok { - return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]ast.Type, len(tc.n.Args)) - for i, _ := range tc.n.Args { - args[len(tc.n.Args)-1-i] = v.StackPop() - } - - // Verify the args - for i, expected := range function.ArgTypes { - if expected == ast.TypeAny { - continue - } - - if args[i] == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if args[i] != expected { - cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) - if cn != nil { - tc.n.Args[i] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, i+1, expected.Printable(), args[i].Printable()) - } - } - - // If we're variadic, then verify the types there - if function.Variadic && function.VariadicType != ast.TypeAny { - args = args[len(function.ArgTypes):] - for i, t := range args { - if t == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if t != function.VariadicType { - realI := i + len(function.ArgTypes) - cn := v.ImplicitConversion( - t, function.VariadicType, tc.n.Args[realI]) - if cn != nil { - tc.n.Args[realI] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, realI, - function.VariadicType.Printable(), t.Printable()) - } - } - } - - // Return type - v.StackPush(function.ReturnType) - - return tc.n, nil -} - -type typeCheckConditional struct { - n *ast.Conditional -} - -func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { - // On the stack we have the types of the condition, true and false - // expressions, but they are in reverse order. - falseType := v.StackPop() - trueType := v.StackPop() - condType := v.StackPop() - - if condType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if condType != ast.TypeBool { - cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) - if cn == nil { - return nil, fmt.Errorf( - "condition must be type bool, not %s", condType.Printable(), - ) - } - tc.n.CondExpr = cn - } - - // The types of the true and false expression must match - if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { - - // Since passing around stringified versions of other types is - // common, we pragmatically allow the false expression to dictate - // the result type when the true expression is a string. - if trueType == ast.TypeString { - cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.TrueExpr = cn - trueType = falseType - } else { - cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.FalseExpr = cn - falseType = trueType - } - } - - // Currently list and map types cannot be used, because we cannot - // generally assert that their element types are consistent. - // Such support might be added later, either by improving the type - // system or restricting usage to only variable and literal expressions, - // but for now this is simply prohibited because it doesn't seem to - // be a common enough case to be worth the complexity. - switch trueType { - case ast.TypeList: - return nil, fmt.Errorf( - "conditional operator cannot be used with list values", - ) - case ast.TypeMap: - return nil, fmt.Errorf( - "conditional operator cannot be used with map values", - ) - } - - // Result type (guaranteed to also match falseType due to the above) - if trueType == ast.TypeUnknown { - // falseType may also be unknown, but that's okay because two - // unknowns means our result is unknown anyway. - v.StackPush(falseType) - } else { - v.StackPush(trueType) - } - - return tc.n, nil -} - -type typeCheckOutput struct { - n *ast.Output -} - -func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { - n := tc.n - types := make([]ast.Type, len(n.Exprs)) - for i, _ := range n.Exprs { - types[len(n.Exprs)-1-i] = v.StackPop() - } - - for _, ty := range types { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - // If there is only one argument and it is a list, we evaluate to a list - if len(types) == 1 { - switch t := types[0]; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - v.StackPush(t) - return n, nil - } - } - - // Otherwise, all concat args must be strings, so validate that - resultType := ast.TypeString - for i, t := range types { - - if t == ast.TypeUnknown { - resultType = ast.TypeUnknown - continue - } - - if t != ast.TypeString { - cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) - if cn != nil { - n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) - } - } - - // This always results in type string, unless there are unknowns - v.StackPush(resultType) - - return n, nil -} - -type typeCheckLiteral struct { - n *ast.LiteralNode -} - -func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { - v.StackPush(tc.n.Typex) - return tc.n, nil -} - -type typeCheckVariableAccess struct { - n *ast.VariableAccess -} - -func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the variable in the map - variable, ok := v.Scope.LookupVar(tc.n.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", tc.n.Name) - } - - // Add the type to the stack - v.StackPush(variable.Type) - - return tc.n, nil -} - -type typeCheckIndex struct { - n *ast.Index -} - -func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { - keyType := v.StackPop() - targetType := v.StackPop() - - if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - // Ensure we have a VariableAccess as the target - varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) - if !ok { - return nil, fmt.Errorf( - "target of an index must be a VariableAccess node, was %T", tc.n.Target) - } - - // Get the variable - variable, ok := v.Scope.LookupVar(varAccessNode.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", varAccessNode.Name) - } - - switch targetType { - case ast.TypeList: - if keyType != ast.TypeInt { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be an int, was %s", keyType) - } - } - - valType, err := ast.VariableListElementTypesAreHomogenous( - varAccessNode.Name, variable.Value.([]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - case ast.TypeMap: - if keyType != ast.TypeString { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be a string, was %s", keyType) - } - } - - valType, err := ast.VariableMapValueTypesAreHomogenous( - varAccessNode.Name, variable.Value.(map[string]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - default: - return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (v *TypeCheck) ImplicitConversion( - actual ast.Type, expected ast.Type, n ast.Node) ast.Node { - if v.Implicit == nil { - return nil - } - - fromMap, ok := v.Implicit[actual] - if !ok { - return nil - } - - toFunc, ok := fromMap[expected] - if !ok { - return nil - } - - return &ast.Call{ - Func: toFunc, - Args: []ast.Node{n}, - Posx: n.Pos(), - } -} - -func (v *TypeCheck) reset() { - v.Stack = nil - v.err = nil -} - -func (v *TypeCheck) StackPush(t ast.Type) { - v.Stack = append(v.Stack, t) -} - -func (v *TypeCheck) StackPop() ast.Type { - var x ast.Type - x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] - return x -} - -func (v *TypeCheck) StackPeek() ast.Type { - if len(v.Stack) == 0 { - return ast.TypeInvalid - } - - return v.Stack[len(v.Stack)-1] -} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go deleted file mode 100644 index f2024d0..0000000 --- a/vendor/github.com/hashicorp/hil/convert.go +++ /dev/null @@ -1,159 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/mapstructure" -) - -// UnknownValue is a sentinel value that can be used to denote -// that a value of a variable (or map element, list element, etc.) -// is unknown. This will always have the type ast.TypeUnknown. -const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -var hilMapstructureDecodeHookSlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookMap map[string]interface{} - -// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode -// but has a DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if inputVariable, ok := input.(ast.Variable); ok { - return inputVariable, nil - } - - var stringVal string - if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { - // Special case the unknown value to turn into "unknown" - if stringVal == UnknownValue { - return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil - } - - // Otherwise return the string value - return ast.Variable{ - Type: ast.TypeString, - Value: stringVal, - }, nil - } - - var mapVal map[string]interface{} - if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { - elements := make(map[string]ast.Variable) - for i, element := range mapVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeMap, - Value: elements, - }, nil - } - - var sliceVal []interface{} - if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { - elements := make([]ast.Variable, len(sliceVal)) - for i, element := range sliceVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeList, - Value: elements, - }, nil - } - - return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) -} - -func VariableToInterface(input ast.Variable) (interface{}, error) { - if input.Type == ast.TypeString { - if inputStr, ok := input.Value.(string); ok { - return inputStr, nil - } else { - return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") - } - } - - if input.Type == ast.TypeList { - inputList, ok := input.Value.([]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") - } - - result := make([]interface{}, 0) - if len(inputList) == 0 { - return result, nil - } - - for _, element := range inputList { - if convertedElement, err := VariableToInterface(element); err == nil { - result = append(result, convertedElement) - } else { - return nil, err - } - } - - return result, nil - } - - if input.Type == ast.TypeMap { - inputMap, ok := input.Value.(map[string]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") - } - - result := make(map[string]interface{}, 0) - if len(inputMap) == 0 { - return result, nil - } - - for key, value := range inputMap { - if convertedValue, err := VariableToInterface(value); err == nil { - result[key] = convertedValue - } else { - return nil, err - } - } - - return result, nil - } - - return nil, fmt.Errorf("unknown input type: %s", input.Type) -} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go deleted file mode 100644 index 2782076..0000000 --- a/vendor/github.com/hashicorp/hil/eval.go +++ /dev/null @@ -1,472 +0,0 @@ -package hil - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// EvalConfig is the configuration for evaluating. -type EvalConfig struct { - // GlobalScope is the global scope of execution for evaluation. - GlobalScope *ast.BasicScope - - // SemanticChecks is a list of additional semantic checks that will be run - // on the tree prior to evaluating it. The type checker, identifier checker, - // etc. will be run before these automatically. - SemanticChecks []SemanticChecker -} - -// SemanticChecker is the type that must be implemented to do a -// semantic check on an AST tree. This will be called with the root node. -type SemanticChecker func(ast.Node) error - -// EvaluationResult is a struct returned from the hil.Eval function, -// representing the result of an interpolation. Results are returned in their -// "natural" Go structure rather than in terms of the HIL AST. For the types -// currently implemented, this means that the Value field can be interpreted as -// the following Go types: -// TypeInvalid: undefined -// TypeString: string -// TypeList: []interface{} -// TypeMap: map[string]interface{} -// TypBool: bool -type EvaluationResult struct { - Type EvalType - Value interface{} -} - -// InvalidResult is a structure representing the result of a HIL interpolation -// which has invalid syntax, missing variables, or some other type of error. -// The error is described out of band in the accompanying error return value. -var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} - -// errExitUnknown is an internal error that when returned means the result -// is an unknown value. We use this for early exit. -var errExitUnknown = errors.New("unknown value") - -func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { - output, outputType, err := internalEval(root, config) - if err != nil { - return InvalidResult, err - } - - // If the result contains any nested unknowns then the result as a whole - // is unknown, so that callers only have to deal with "entirely known" - // or "entirely unknown" as outcomes. - if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { - outputType = ast.TypeUnknown - output = UnknownValue - } - - switch outputType { - case ast.TypeList: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeList, - Value: output, - }) - return EvaluationResult{ - Type: TypeList, - Value: val, - }, err - case ast.TypeMap: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeMap, - Value: output, - }) - return EvaluationResult{ - Type: TypeMap, - Value: val, - }, err - case ast.TypeString: - return EvaluationResult{ - Type: TypeString, - Value: output, - }, nil - case ast.TypeBool: - return EvaluationResult{ - Type: TypeBool, - Value: output, - }, nil - case ast.TypeUnknown: - return EvaluationResult{ - Type: TypeUnknown, - Value: UnknownValue, - }, nil - default: - return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) - } -} - -// Eval evaluates the given AST tree and returns its output value, the type -// of the output, and any error that occurred. -func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { - // Copy the scope so we can add our builtins - if config == nil { - config = new(EvalConfig) - } - scope := registerBuiltins(config.GlobalScope) - implicitMap := map[ast.Type]map[ast.Type]string{ - ast.TypeFloat: { - ast.TypeInt: "__builtin_FloatToInt", - ast.TypeString: "__builtin_FloatToString", - }, - ast.TypeInt: { - ast.TypeFloat: "__builtin_IntToFloat", - ast.TypeString: "__builtin_IntToString", - }, - ast.TypeString: { - ast.TypeInt: "__builtin_StringToInt", - ast.TypeFloat: "__builtin_StringToFloat", - ast.TypeBool: "__builtin_StringToBool", - }, - ast.TypeBool: { - ast.TypeString: "__builtin_BoolToString", - }, - } - - // Build our own semantic checks that we always run - tv := &TypeCheck{Scope: scope, Implicit: implicitMap} - ic := &IdentifierCheck{Scope: scope} - - // Build up the semantic checks for execution - checks := make( - []SemanticChecker, - len(config.SemanticChecks), - len(config.SemanticChecks)+2) - copy(checks, config.SemanticChecks) - checks = append(checks, ic.Visit) - checks = append(checks, tv.Visit) - - // Run the semantic checks - for _, check := range checks { - if err := check(root); err != nil { - return nil, ast.TypeInvalid, err - } - } - - // Execute - v := &evalVisitor{Scope: scope} - return v.Visit(root) -} - -// EvalNode is the interface that must be implemented by any ast.Node -// to support evaluation. This will be called in visitor pattern order. -// The result of each call to Eval is automatically pushed onto the -// stack as a LiteralNode. Pop elements off the stack to get child -// values. -type EvalNode interface { - Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) -} - -type evalVisitor struct { - Scope ast.Scope - Stack ast.Stack - - err error - lock sync.Mutex -} - -func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { - // Run the actual visitor pattern - root.Accept(v.visit) - - // Get our result and clear out everything else - var result *ast.LiteralNode - if v.Stack.Len() > 0 { - result = v.Stack.Pop().(*ast.LiteralNode) - } else { - result = new(ast.LiteralNode) - } - resultErr := v.err - if resultErr == errExitUnknown { - // This means the return value is unknown and we used the error - // as an early exit mechanism. Reset since the value on the stack - // should be the unknown value. - resultErr = nil - } - - // Clear everything else so we aren't just dangling - v.Stack.Reset() - v.err = nil - - t, err := result.Type(v.Scope) - if err != nil { - return nil, ast.TypeInvalid, err - } - - return result.Value, t, resultErr -} - -func (v *evalVisitor) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - en, err := evalNode(raw) - if err != nil { - v.err = err - return raw - } - - out, outType, err := en.Eval(v.Scope, &v.Stack) - if err != nil { - v.err = err - return raw - } - - v.Stack.Push(&ast.LiteralNode{ - Value: out, - Typex: outType, - }) - - if outType == ast.TypeUnknown { - // Halt immediately - v.err = errExitUnknown - return raw - } - - return raw -} - -// evalNode is a private function that returns an EvalNode for built-in -// types as well as any other EvalNode implementations. -func evalNode(raw ast.Node) (EvalNode, error) { - switch n := raw.(type) { - case *ast.Index: - return &evalIndex{n}, nil - case *ast.Call: - return &evalCall{n}, nil - case *ast.Conditional: - return &evalConditional{n}, nil - case *ast.Output: - return &evalOutput{n}, nil - case *ast.LiteralNode: - return &evalLiteralNode{n}, nil - case *ast.VariableAccess: - return &evalVariableAccess{n}, nil - default: - en, ok := n.(EvalNode) - if !ok { - return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) - } - - return en, nil - } -} - -type evalCall struct{ *ast.Call } - -func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // Look up the function in the map - function, ok := s.LookupFunc(v.Func) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown function called: %s", v.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]interface{}, len(v.Args)) - for i, _ := range v.Args { - node := stack.Pop().(*ast.LiteralNode) - if node.IsUnknown() { - // If any arguments are unknown then the result is automatically unknown - return UnknownValue, ast.TypeUnknown, nil - } - args[len(v.Args)-1-i] = node.Value - } - - // Call the function - result, err := function.Callback(args) - if err != nil { - return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) - } - - return result, function.ReturnType, nil -} - -type evalConditional struct{ *ast.Conditional } - -func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // On the stack we have literal nodes representing the resulting values - // of the condition, true and false expressions, but they are in reverse - // order. - falseLit := stack.Pop().(*ast.LiteralNode) - trueLit := stack.Pop().(*ast.LiteralNode) - condLit := stack.Pop().(*ast.LiteralNode) - - if condLit.IsUnknown() { - // If our conditional is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - if condLit.Value.(bool) { - return trueLit.Value, trueLit.Typex, nil - } else { - return falseLit.Value, trueLit.Typex, nil - } -} - -type evalIndex struct{ *ast.Index } - -func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - key := stack.Pop().(*ast.LiteralNode) - target := stack.Pop().(*ast.LiteralNode) - - variableName := v.Index.Target.(*ast.VariableAccess).Name - - if key.IsUnknown() { - // If our key is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - // For target, we'll accept collections containing unknown values but - // we still need to catch when the collection itself is unknown, shallowly. - if target.Typex == ast.TypeUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - switch target.Typex { - case ast.TypeList: - return v.evalListIndex(variableName, target.Value, key.Value) - case ast.TypeMap: - return v.evalMapIndex(variableName, target.Value, key.Value) - default: - return nil, ast.TypeInvalid, fmt.Errorf( - "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", - variableName, target.Typex) - } -} - -func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a list and key is an int - list, ok := target.([]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to []Variable, is: %T", target) - } - - keyInt, ok := key.(int) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to int, is: %T", key) - } - - if len(list) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("list is empty") - } - - if keyInt < 0 || len(list) < keyInt+1 { - return nil, ast.TypeInvalid, fmt.Errorf( - "index %d out of range for list %s (max %d)", - keyInt, variableName, len(list)) - } - - returnVal := list[keyInt].Value - returnType := list[keyInt].Type - return returnVal, returnType, nil -} - -func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a map and key is a string - vmap, ok := target.(map[string]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to map[string]Variable, is: %T", target) - } - - keyString, ok := key.(string) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to string, is: %T", key) - } - - if len(vmap) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("map is empty") - } - - value, ok := vmap[keyString] - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "key %q does not exist in map %s", keyString, variableName) - } - - return value.Value, value.Type, nil -} - -type evalOutput struct{ *ast.Output } - -func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // The expressions should all be on the stack in reverse - // order. So pop them off, reverse their order, and concatenate. - nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) - haveUnknown := false - for range v.Exprs { - n := stack.Pop().(*ast.LiteralNode) - nodes = append(nodes, n) - - // If we have any unknowns then the whole result is unknown - // (we must deal with this first, because the type checker can - // skip type conversions in the presence of unknowns, and thus - // any of our other nodes may be incorrectly typed.) - if n.IsUnknown() { - haveUnknown = true - } - } - - if haveUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - // Special case the single list and map - if len(nodes) == 1 { - switch t := nodes[0].Typex; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - fallthrough - case ast.TypeUnknown: - return nodes[0].Value, t, nil - } - } - - // Otherwise concatenate the strings - var buf bytes.Buffer - for i := len(nodes) - 1; i >= 0; i-- { - if nodes[i].Typex != ast.TypeString { - return nil, ast.TypeInvalid, fmt.Errorf( - "invalid output with %s value at index %d: %#v", - nodes[i].Typex, - i, - nodes[i].Value, - ) - } - buf.WriteString(nodes[i].Value.(string)) - } - - return buf.String(), ast.TypeString, nil -} - -type evalLiteralNode struct{ *ast.LiteralNode } - -func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { - return v.Value, v.Typex, nil -} - -type evalVariableAccess struct{ *ast.VariableAccess } - -func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { - // Look up the variable in the map - variable, ok := scope.LookupVar(v.Name) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown variable accessed: %s", v.Name) - } - - return variable.Value, variable.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go deleted file mode 100644 index 6946ecd..0000000 --- a/vendor/github.com/hashicorp/hil/eval_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package hil - -//go:generate stringer -type=EvalType eval_type.go - -// EvalType represents the type of the output returned from a HIL -// evaluation. -type EvalType uint32 - -const ( - TypeInvalid EvalType = 0 - TypeString EvalType = 1 << iota - TypeBool - TypeList - TypeMap - TypeUnknown -) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go deleted file mode 100644 index b107ddd..0000000 --- a/vendor/github.com/hashicorp/hil/evaltype_string.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT - -package hil - -import "fmt" - -const ( - _EvalType_name_0 = "TypeInvalid" - _EvalType_name_1 = "TypeString" - _EvalType_name_2 = "TypeBool" - _EvalType_name_3 = "TypeList" - _EvalType_name_4 = "TypeMap" - _EvalType_name_5 = "TypeUnknown" -) - -var ( - _EvalType_index_0 = [...]uint8{0, 11} - _EvalType_index_1 = [...]uint8{0, 10} - _EvalType_index_2 = [...]uint8{0, 8} - _EvalType_index_3 = [...]uint8{0, 8} - _EvalType_index_4 = [...]uint8{0, 7} - _EvalType_index_5 = [...]uint8{0, 11} -) - -func (i EvalType) String() string { - switch { - case i == 0: - return _EvalType_name_0 - case i == 2: - return _EvalType_name_1 - case i == 4: - return _EvalType_name_2 - case i == 8: - return _EvalType_name_3 - case i == 16: - return _EvalType_name_4 - case i == 32: - return _EvalType_name_5 - default: - return fmt.Sprintf("EvalType(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go deleted file mode 100644 index ecbe1fd..0000000 --- a/vendor/github.com/hashicorp/hil/parse.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/parser" - "github.com/hashicorp/hil/scanner" -) - -// Parse parses the given program and returns an executable AST tree. -// -// Syntax errors are returned with error having the dynamic type -// *parser.ParseError, which gives the caller access to the source position -// where the error was found, which allows (for example) combining it with -// a known source filename to add context to the error message. -func Parse(v string) (ast.Node, error) { - return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) -} - -// ParseWithPosition is like Parse except that it overrides the source -// row and column position of the first character in the string, which should -// be 1-based. -// -// This can be used when HIL is embedded in another language and the outer -// parser knows the row and column where the HIL expression started within -// the overall source file. -func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { - ch := scanner.Scan(v, pos) - return parser.Parse(ch) -} diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go deleted file mode 100644 index 2e013e0..0000000 --- a/vendor/github.com/hashicorp/hil/parser/binary_op.go +++ /dev/null @@ -1,45 +0,0 @@ -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -var binaryOps []map[scanner.TokenType]ast.ArithmeticOp - -func init() { - // This operation table maps from the operator's scanner token type - // to the AST arithmetic operation. All expressions produced from - // binary operators are *ast.Arithmetic nodes. - // - // Binary operator groups are listed in order of precedence, with - // the *lowest* precedence first. Operators within the same group - // have left-to-right associativity. - binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ - { - scanner.OR: ast.ArithmeticOpLogicalOr, - }, - { - scanner.AND: ast.ArithmeticOpLogicalAnd, - }, - { - scanner.EQUAL: ast.ArithmeticOpEqual, - scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, - }, - { - scanner.GT: ast.ArithmeticOpGreaterThan, - scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, - scanner.LT: ast.ArithmeticOpLessThan, - scanner.LTE: ast.ArithmeticOpLessThanOrEqual, - }, - { - scanner.PLUS: ast.ArithmeticOpAdd, - scanner.MINUS: ast.ArithmeticOpSub, - }, - { - scanner.STAR: ast.ArithmeticOpMul, - scanner.SLASH: ast.ArithmeticOpDiv, - scanner.PERCENT: ast.ArithmeticOpMod, - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go deleted file mode 100644 index bacd696..0000000 --- a/vendor/github.com/hashicorp/hil/parser/error.go +++ /dev/null @@ -1,38 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -type ParseError struct { - Message string - Pos ast.Pos -} - -func Errorf(pos ast.Pos, format string, args ...interface{}) error { - return &ParseError{ - Message: fmt.Sprintf(format, args...), - Pos: pos, - } -} - -// TokenErrorf is a convenient wrapper around Errorf that uses the -// position of the given token. -func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { - return Errorf(token.Pos, format, args...) -} - -func ExpectationError(wanted string, got *scanner.Token) error { - return TokenErrorf(got, "expected %s but found %s", wanted, got) -} - -func (e *ParseError) Error() string { - return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) -} - -func (e *ParseError) String() string { - return e.Error() -} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go deleted file mode 100644 index de954f3..0000000 --- a/vendor/github.com/hashicorp/hil/parser/fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build gofuzz - -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -// This is a fuzz testing function designed to be used with go-fuzz: -// https://github.com/dvyukov/go-fuzz -// -// It's not included in a normal build due to the gofuzz build tag above. -// -// There are some input files that you can use as a seed corpus for go-fuzz -// in the directory ./fuzz-corpus . - -func Fuzz(data []byte) int { - str := string(data) - - ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) - _, err := Parse(ch) - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go deleted file mode 100644 index 376f1c4..0000000 --- a/vendor/github.com/hashicorp/hil/parser/parser.go +++ /dev/null @@ -1,522 +0,0 @@ -package parser - -import ( - "strconv" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -func Parse(ch <-chan *scanner.Token) (ast.Node, error) { - peeker := scanner.NewPeeker(ch) - parser := &parser{peeker} - output, err := parser.ParseTopLevel() - peeker.Close() - return output, err -} - -type parser struct { - peeker *scanner.Peeker -} - -func (p *parser) ParseTopLevel() (ast.Node, error) { - return p.parseInterpolationSeq(false) -} - -func (p *parser) ParseQuoted() (ast.Node, error) { - return p.parseInterpolationSeq(true) -} - -// parseInterpolationSeq parses either the top-level sequence of literals -// and interpolation expressions or a similar sequence within a quoted -// string inside an interpolation expression. The latter case is requested -// by setting 'quoted' to true. -func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { - literalType := scanner.LITERAL - endType := scanner.EOF - if quoted { - // exceptions for quoted sequences - literalType = scanner.STRING - endType = scanner.CQUOTE - } - - startPos := p.peeker.Peek().Pos - - if quoted { - tok := p.peeker.Read() - if tok.Type != scanner.OQUOTE { - return nil, ExpectationError("open quote", tok) - } - } - - var exprs []ast.Node - for { - tok := p.peeker.Read() - - if tok.Type == endType { - break - } - - switch tok.Type { - case literalType: - val, err := p.parseStringToken(tok) - if err != nil { - return nil, err - } - exprs = append(exprs, &ast.LiteralNode{ - Value: val, - Typex: ast.TypeString, - Posx: tok.Pos, - }) - case scanner.BEGIN: - expr, err := p.ParseInterpolation() - if err != nil { - return nil, err - } - exprs = append(exprs, expr) - default: - return nil, ExpectationError(`"${"`, tok) - } - } - - if len(exprs) == 0 { - // If we have no parts at all then the input must've - // been an empty string. - exprs = append(exprs, &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: startPos, - }) - } - - // As a special case, if our "Output" contains only one expression - // and it's a literal string then we'll hoist it up to be our - // direct return value, so callers can easily recognize a string - // that has no interpolations at all. - if len(exprs) == 1 { - if lit, ok := exprs[0].(*ast.LiteralNode); ok { - if lit.Typex == ast.TypeString { - return lit, nil - } - } - } - - return &ast.Output{ - Exprs: exprs, - Posx: startPos, - }, nil -} - -// parseStringToken takes a token of either LITERAL or STRING type and -// returns the interpreted string, after processing any relevant -// escape sequences. -func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { - var backslashes bool - switch tok.Type { - case scanner.LITERAL: - backslashes = false - case scanner.STRING: - backslashes = true - default: - panic("unsupported string token type") - } - - raw := []byte(tok.Content) - buf := make([]byte, 0, len(raw)) - - for i := 0; i < len(raw); i++ { - b := raw[i] - more := len(raw) > (i + 1) - - if b == '$' { - if more && raw[i+1] == '$' { - // skip over the second dollar sign - i++ - } - } else if backslashes && b == '\\' { - if !more { - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `unfinished backslash escape sequence`, - ) - } - escapeType := raw[i+1] - switch escapeType { - case '\\': - // skip over the second slash - i++ - case 'n': - b = '\n' - i++ - case '"': - b = '"' - i++ - default: - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `invalid backslash escape sequence`, - ) - } - } - - buf = append(buf, b) - } - - return string(buf), nil -} - -func (p *parser) ParseInterpolation() (ast.Node, error) { - // By the time we're called, we're already "inside" the ${ sequence - // because the caller consumed the ${ token. - - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - - err = p.requireTokenType(scanner.END, `"}"`) - if err != nil { - return nil, err - } - - return expr, nil -} - -func (p *parser) ParseExpression() (ast.Node, error) { - return p.parseTernaryCond() -} - -func (p *parser) parseTernaryCond() (ast.Node, error) { - // The ternary condition operator (.. ? .. : ..) behaves somewhat - // like a binary operator except that the "operator" is itself - // an expression enclosed in two punctuation characters. - // The middle expression is parsed as if the ? and : symbols - // were parentheses. The "rhs" (the "false expression") is then - // treated right-associatively so it behaves similarly to the - // middle in terms of precedence. - - startPos := p.peeker.Peek().Pos - - var cond, trueExpr, falseExpr ast.Node - var err error - - cond, err = p.parseBinaryOps(binaryOps) - if err != nil { - return nil, err - } - - next := p.peeker.Peek() - if next.Type != scanner.QUESTION { - return cond, nil - } - - p.peeker.Read() // eat question mark - - trueExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - colon := p.peeker.Read() - if colon.Type != scanner.COLON { - return nil, ExpectationError(":", colon) - } - - falseExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - return &ast.Conditional{ - CondExpr: cond, - TrueExpr: trueExpr, - FalseExpr: falseExpr, - Posx: startPos, - }, nil -} - -// parseBinaryOps calls itself recursively to work through all of the -// operator precedence groups, and then eventually calls ParseExpressionTerm -// for each operand. -func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { - if len(ops) == 0 { - // We've run out of operators, so now we'll just try to parse a term. - return p.ParseExpressionTerm() - } - - thisLevel := ops[0] - remaining := ops[1:] - - startPos := p.peeker.Peek().Pos - - var lhs, rhs ast.Node - operator := ast.ArithmeticOpInvalid - var err error - - // parse a term that might be the first operand of a binary - // expression or it might just be a standalone term, but - // we won't know until we've parsed it and can look ahead - // to see if there's an operator token. - lhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - - // We'll keep eating up arithmetic operators until we run - // out, so that operators with the same precedence will combine in a - // left-associative manner: - // a+b+c => (a+b)+c, not a+(b+c) - // - // Should we later want to have right-associative operators, a way - // to achieve that would be to call back up to ParseExpression here - // instead of iteratively parsing only the remaining operators. - for { - next := p.peeker.Peek() - var newOperator ast.ArithmeticOp - var ok bool - if newOperator, ok = thisLevel[next.Type]; !ok { - break - } - - // Are we extending an expression started on - // the previous iteration? - if operator != ast.ArithmeticOpInvalid { - lhs = &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - } - } - - operator = newOperator - p.peeker.Read() // eat operator token - rhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - } - - if operator != ast.ArithmeticOpInvalid { - return &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - }, nil - } else { - return lhs, nil - } -} - -func (p *parser) ParseExpressionTerm() (ast.Node, error) { - - next := p.peeker.Peek() - - switch next.Type { - - case scanner.OPAREN: - p.peeker.Read() - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CPAREN, `")"`) - return expr, err - - case scanner.OQUOTE: - return p.ParseQuoted() - - case scanner.INTEGER: - tok := p.peeker.Read() - val, err := strconv.Atoi(tok.Content) - if err != nil { - return nil, TokenErrorf(tok, "invalid integer: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeInt, - Posx: tok.Pos, - }, nil - - case scanner.FLOAT: - tok := p.peeker.Read() - val, err := strconv.ParseFloat(tok.Content, 64) - if err != nil { - return nil, TokenErrorf(tok, "invalid float: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeFloat, - Posx: tok.Pos, - }, nil - - case scanner.BOOL: - tok := p.peeker.Read() - // the scanner guarantees that tok.Content is either "true" or "false" - var val bool - if tok.Content[0] == 't' { - val = true - } else { - val = false - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeBool, - Posx: tok.Pos, - }, nil - - case scanner.MINUS: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents negative numbers as - // a binary subtraction of the number from zero. - return &ast.Arithmetic{ - Op: ast.ArithmeticOpSub, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: 0, - Typex: ast.TypeInt, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.BANG: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents binary negation as an equality - // test with "false". - return &ast.Arithmetic{ - Op: ast.ArithmeticOpEqual, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: false, - Typex: ast.TypeBool, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.IDENTIFIER: - return p.ParseScopeInteraction() - - default: - return nil, ExpectationError("expression", next) - } -} - -// ParseScopeInteraction parses the expression types that interact -// with the evaluation scope: variable access, function calls, and -// indexing. -// -// Indexing should actually be a distinct operator in its own right, -// so that e.g. it can be applied to the result of a function call, -// but for now we're preserving the behavior of the older yacc-based -// parser. -func (p *parser) ParseScopeInteraction() (ast.Node, error) { - first := p.peeker.Read() - startPos := first.Pos - if first.Type != scanner.IDENTIFIER { - return nil, ExpectationError("identifier", first) - } - - next := p.peeker.Peek() - if next.Type == scanner.OPAREN { - // function call - funcName := first.Content - p.peeker.Read() // eat paren - var args []ast.Node - - for { - if p.peeker.Peek().Type == scanner.CPAREN { - break - } - - arg, err := p.ParseExpression() - if err != nil { - return nil, err - } - - args = append(args, arg) - - if p.peeker.Peek().Type == scanner.COMMA { - p.peeker.Read() // eat comma - continue - } else { - break - } - } - - err := p.requireTokenType(scanner.CPAREN, `")"`) - if err != nil { - return nil, err - } - - return &ast.Call{ - Func: funcName, - Args: args, - Posx: startPos, - }, nil - } - - varNode := &ast.VariableAccess{ - Name: first.Content, - Posx: startPos, - } - - if p.peeker.Peek().Type == scanner.OBRACKET { - // index operator - startPos := p.peeker.Read().Pos // eat bracket - indexExpr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CBRACKET, `"]"`) - if err != nil { - return nil, err - } - return &ast.Index{ - Target: varNode, - Key: indexExpr, - Posx: startPos, - }, nil - } - - return varNode, nil -} - -// requireTokenType consumes the next token an returns an error if its -// type does not match the given type. nil is returned if the type matches. -// -// This is a helper around peeker.Read() for situations where the parser just -// wants to assert that a particular token type must be present. -func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { - token := p.peeker.Read() - if token.Type != wantType { - return ExpectationError(wantName, token) - } - return nil -} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go deleted file mode 100644 index 4de3728..0000000 --- a/vendor/github.com/hashicorp/hil/scanner/peeker.go +++ /dev/null @@ -1,55 +0,0 @@ -package scanner - -// Peeker is a utility that wraps a token channel returned by Scan and -// provides an interface that allows a caller (e.g. the parser) to -// work with the token stream in a mode that allows one token of lookahead, -// and provides utilities for more convenient processing of the stream. -type Peeker struct { - ch <-chan *Token - peeked *Token -} - -func NewPeeker(ch <-chan *Token) *Peeker { - return &Peeker{ - ch: ch, - } -} - -// Peek returns the next token in the stream without consuming it. A -// subsequent call to Read will return the same token. -func (p *Peeker) Peek() *Token { - if p.peeked == nil { - p.peeked = <-p.ch - } - return p.peeked -} - -// Read consumes the next token in the stream and returns it. -func (p *Peeker) Read() *Token { - token := p.Peek() - - // As a special case, we will produce the EOF token forever once - // it is reached. - if token.Type != EOF { - p.peeked = nil - } - - return token -} - -// Close ensures that the token stream has been exhausted, to prevent -// the goroutine in the underlying scanner from leaking. -// -// It's not necessary to call this if the caller reads the token stream -// to EOF, since that implicitly closes the scanner. -func (p *Peeker) Close() { - for _ = range p.ch { - // discard - } - // Install a synthetic EOF token in 'peeked' in case someone - // erroneously calls Peek() or Read() after we've closed. - p.peeked = &Token{ - Type: EOF, - Content: "", - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go deleted file mode 100644 index 86085de..0000000 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ /dev/null @@ -1,556 +0,0 @@ -package scanner - -import ( - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" -) - -// Scan returns a channel that recieves Tokens from the given input string. -// -// The scanner's job is just to partition the string into meaningful parts. -// It doesn't do any transformation of the raw input string, so the caller -// must deal with any further interpretation required, such as parsing INTEGER -// tokens into real ints, or dealing with escape sequences in LITERAL or -// STRING tokens. -// -// Strings in the returned tokens are slices from the original string. -// -// startPos should be set to ast.InitPos unless the caller knows that -// this interpolation string is part of a larger file and knows the position -// of the first character in that larger file. -func Scan(s string, startPos ast.Pos) <-chan *Token { - ch := make(chan *Token) - go scan(s, ch, startPos) - return ch -} - -func scan(s string, ch chan<- *Token, pos ast.Pos) { - // 'remain' starts off as the whole string but we gradually - // slice of the front of it as we work our way through. - remain := s - - // nesting keeps track of how many ${ .. } sequences we are - // inside, so we can recognize the minor differences in syntax - // between outer string literals (LITERAL tokens) and quoted - // string literals (STRING tokens). - nesting := 0 - - // We're going to flip back and forth between parsing literals/strings - // and parsing interpolation sequences ${ .. } until we reach EOF or - // some INVALID token. -All: - for { - startPos := pos - // Literal string processing first, since the beginning of - // a string is always outside of an interpolation sequence. - literalVal, terminator := scanLiteral(remain, pos, nesting > 0) - - if len(literalVal) > 0 { - litType := LITERAL - if nesting > 0 { - litType = STRING - } - ch <- &Token{ - Type: litType, - Content: literalVal, - Pos: startPos, - } - remain = remain[len(literalVal):] - } - - ch <- terminator - remain = remain[len(terminator.Content):] - pos = terminator.Pos - // Safe to use len() here because none of the terminator tokens - // can contain UTF-8 sequences. - pos.Column = pos.Column + len(terminator.Content) - - switch terminator.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done! - break All - case BEGIN: - nesting++ - case CQUOTE: - // nothing special to do - default: - // Should never happen - panic("invalid string/literal terminator") - } - - // Now we do the processing of the insides of ${ .. } sequences. - // This loop terminates when we encounter either a closing } or - // an opening ", which will cause us to return to literal processing. - Interpolation: - for { - - token, size, newPos := scanInterpolationToken(remain, pos) - ch <- token - remain = remain[size:] - pos = newPos - - switch token.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done - // (though a syntax error that we'll catch in the parser) - break All - case END: - nesting-- - if nesting < 0 { - // Can happen if there are unbalanced ${ and } sequences - // in the input, which we'll catch in the parser. - nesting = 0 - } - break Interpolation - case OQUOTE: - // Beginning of nested quoted string - break Interpolation - } - } - } - - close(ch) -} - -// Returns the token found at the start of the given string, followed by -// the number of bytes that were consumed from the string and the adjusted -// source position. -// -// Note that the number of bytes consumed can be more than the length of -// the returned token contents if the string begins with whitespace, since -// it will be silently consumed before reading the token. -func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { - pos := startPos - size := 0 - - // Consume whitespace, if any - for len(s) > 0 && byteIsSpace(s[0]) { - if s[0] == '\n' { - pos.Column = 1 - pos.Line++ - } else { - pos.Column++ - } - size++ - s = s[1:] - } - - // Unexpected EOF during sequence - if len(s) == 0 { - return &Token{ - Type: EOF, - Content: "", - Pos: pos, - }, size, pos - } - - next := s[0] - var token *Token - - switch next { - case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': - // Easy punctuation symbols that don't have any special meaning - // during scanning, and that stand for themselves in the - // TokenType enumeration. - token = &Token{ - Type: TokenType(next), - Content: s[:1], - Pos: pos, - } - case '}': - token = &Token{ - Type: END, - Content: s[:1], - Pos: pos, - } - case '"': - token = &Token{ - Type: OQUOTE, - Content: s[:1], - Pos: pos, - } - case '!': - if len(s) >= 2 && s[:2] == "!=" { - token = &Token{ - Type: NOTEQUAL, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: BANG, - Content: s[:1], - Pos: pos, - } - } - case '<': - if len(s) >= 2 && s[:2] == "<=" { - token = &Token{ - Type: LTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: LT, - Content: s[:1], - Pos: pos, - } - } - case '>': - if len(s) >= 2 && s[:2] == ">=" { - token = &Token{ - Type: GTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: GT, - Content: s[:1], - Pos: pos, - } - } - case '=': - if len(s) >= 2 && s[:2] == "==" { - token = &Token{ - Type: EQUAL, - Content: s[:2], - Pos: pos, - } - } else { - // A single equals is not a valid operator - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '&': - if len(s) >= 2 && s[:2] == "&&" { - token = &Token{ - Type: AND, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '|': - if len(s) >= 2 && s[:2] == "||" { - token = &Token{ - Type: OR, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - default: - if next >= '0' && next <= '9' { - num, numType := scanNumber(s) - token = &Token{ - Type: numType, - Content: num, - Pos: pos, - } - } else if stringStartsWithIdentifier(s) { - ident, runeLen := scanIdentifier(s) - tokenType := IDENTIFIER - if ident == "true" || ident == "false" { - tokenType = BOOL - } - token = &Token{ - Type: tokenType, - Content: ident, - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + runeLen - return token, size + len(ident), pos - } else { - _, byteLen := utf8.DecodeRuneInString(s) - token = &Token{ - Type: INVALID, - Content: s[:byteLen], - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + 1 - return token, size + byteLen, pos - } - } - - // Here we assume that the token content contains no UTF-8 sequences, - // because we dealt with UTF-8 characters as a special case where - // necessary above. - size = size + len(token.Content) - pos.Column = pos.Column + len(token.Content) - - return token, size, pos -} - -// Returns the (possibly-empty) prefix of the given string that represents -// a literal, followed by the token that marks the end of the literal. -func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { - litLen := 0 - pos := startPos - var terminator *Token - for { - - if litLen >= len(s) { - if nested { - // We've ended in the middle of a quoted string, - // which means this token is actually invalid. - return "", &Token{ - Type: INVALID, - Content: s, - Pos: startPos, - } - } - terminator = &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break - } - - next := s[litLen] - - if next == '$' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '{' { - terminator = &Token{ - Type: BEGIN, - Content: s[litLen : litLen+2], - Pos: pos, - } - pos.Column = pos.Column + 2 - break - } else if follow == '$' { - // Double-$ escapes the special processing of $, - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - - // special handling that applies only to quoted strings - if nested { - if next == '"' { - terminator = &Token{ - Type: CQUOTE, - Content: s[litLen : litLen+1], - Pos: pos, - } - pos.Column = pos.Column + 1 - break - } - - // Escaped quote marks do not terminate the string. - // - // All we do here in the scanner is avoid terminating a string - // due to an escaped quote. The parser is responsible for the - // full handling of escape sequences, since it's able to produce - // better error messages than we can produce in here. - if next == '\\' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '"' { - // \" escapes the special processing of ", - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } else if follow == '\\' { - // \\ escapes \ - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - } - - if next == '\n' { - pos.Column = 1 - pos.Line++ - litLen++ - } else { - pos.Column++ - - // "Column" measures runes, so we need to actually consume - // a valid UTF-8 character here. - _, size := utf8.DecodeRuneInString(s[litLen:]) - litLen = litLen + size - } - - } - - return s[:litLen], terminator -} - -// scanNumber returns the extent of the prefix of the string that represents -// a valid number, along with what type of number it represents: INT or FLOAT. -// -// scanNumber does only basic character analysis: numbers consist of digits -// and periods, with at least one period signalling a FLOAT. It's the parser's -// responsibility to validate the form and range of the number, such as ensuring -// that a FLOAT actually contains only one period, etc. -func scanNumber(s string) (string, TokenType) { - period := -1 - byteLen := 0 - numType := INTEGER - for { - if byteLen >= len(s) { - break - } - - next := s[byteLen] - if next != '.' && (next < '0' || next > '9') { - // If our last value was a period, then we're not a float, - // we're just an integer that ends in a period. - if period == byteLen-1 { - byteLen-- - numType = INTEGER - } - - break - } - - if next == '.' { - // If we've already seen a period, break out - if period >= 0 { - break - } - - period = byteLen - numType = FLOAT - } - - byteLen++ - } - - return s[:byteLen], numType -} - -// scanIdentifier returns the extent of the prefix of the string that -// represents a valid identifier, along with the length of that prefix -// in runes. -// -// Identifiers may contain utf8-encoded non-Latin letters, which will -// cause the returned "rune length" to be shorter than the byte length -// of the returned string. -func scanIdentifier(s string) (string, int) { - byteLen := 0 - runeLen := 0 - for { - if byteLen >= len(s) { - break - } - - nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) - if !(nextRune == '_' || - nextRune == '-' || - nextRune == '.' || - nextRune == '*' || - unicode.IsNumber(nextRune) || - unicode.IsLetter(nextRune) || - unicode.IsMark(nextRune)) { - break - } - - // If we reach a star, it must be between periods to be part - // of the same identifier. - if nextRune == '*' && s[byteLen-1] != '.' { - break - } - - // If our previous character was a star, then the current must - // be period. Otherwise, undo that and exit. - if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { - byteLen-- - if s[byteLen-1] == '.' { - byteLen-- - } - - break - } - - byteLen = byteLen + size - runeLen = runeLen + 1 - } - - return s[:byteLen], runeLen -} - -// byteIsSpace implements a restrictive interpretation of spaces that includes -// only what's valid inside interpolation sequences: spaces, tabs, newlines. -func byteIsSpace(b byte) bool { - switch b { - case ' ', '\t', '\r', '\n': - return true - default: - return false - } -} - -// stringStartsWithIdentifier returns true if the given string begins with -// a character that is a legal start of an identifier: an underscore or -// any character that Unicode considers to be a letter. -func stringStartsWithIdentifier(s string) bool { - if len(s) == 0 { - return false - } - - first := s[0] - - // Easy ASCII cases first - if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { - return true - } - - // If our first byte begins a UTF-8 sequence then the sequence might - // be a unicode letter. - if utf8.RuneStart(first) { - firstRune, _ := utf8.DecodeRuneInString(s) - if unicode.IsLetter(firstRune) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go deleted file mode 100644 index b6c82ae..0000000 --- a/vendor/github.com/hashicorp/hil/scanner/token.go +++ /dev/null @@ -1,105 +0,0 @@ -package scanner - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -type Token struct { - Type TokenType - Content string - Pos ast.Pos -} - -//go:generate stringer -type=TokenType -type TokenType rune - -const ( - // Raw string data outside of ${ .. } sequences - LITERAL TokenType = 'o' - - // STRING is like a LITERAL but it's inside a quoted string - // within a ${ ... } sequence, and so it can contain backslash - // escaping. - STRING TokenType = 'S' - - // Other Literals - INTEGER TokenType = 'I' - FLOAT TokenType = 'F' - BOOL TokenType = 'B' - - BEGIN TokenType = '$' // actually "${" - END TokenType = '}' - OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence - CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence - OPAREN TokenType = '(' - CPAREN TokenType = ')' - OBRACKET TokenType = '[' - CBRACKET TokenType = ']' - COMMA TokenType = ',' - - IDENTIFIER TokenType = 'i' - - PERIOD TokenType = '.' - PLUS TokenType = '+' - MINUS TokenType = '-' - STAR TokenType = '*' - SLASH TokenType = '/' - PERCENT TokenType = '%' - - AND TokenType = '∧' - OR TokenType = '∨' - BANG TokenType = '!' - - EQUAL TokenType = '=' - NOTEQUAL TokenType = '≠' - GT TokenType = '>' - LT TokenType = '<' - GTE TokenType = '≥' - LTE TokenType = '≤' - - QUESTION TokenType = '?' - COLON TokenType = ':' - - EOF TokenType = '␄' - - // Produced for sequences that cannot be understood as valid tokens - // e.g. due to use of unrecognized punctuation. - INVALID TokenType = '�' -) - -func (t *Token) String() string { - switch t.Type { - case EOF: - return "end of string" - case INVALID: - return fmt.Sprintf("invalid sequence %q", t.Content) - case INTEGER: - return fmt.Sprintf("integer %s", t.Content) - case FLOAT: - return fmt.Sprintf("float %s", t.Content) - case STRING: - return fmt.Sprintf("string %q", t.Content) - case LITERAL: - return fmt.Sprintf("literal %q", t.Content) - case OQUOTE: - return fmt.Sprintf("opening quote") - case CQUOTE: - return fmt.Sprintf("closing quote") - case AND: - return "&&" - case OR: - return "||" - case NOTEQUAL: - return "!=" - case GTE: - return ">=" - case LTE: - return "<=" - default: - // The remaining token types have content that - // speaks for itself. - return fmt.Sprintf("%q", t.Content) - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go deleted file mode 100644 index a602f5f..0000000 --- a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=TokenType"; DO NOT EDIT - -package scanner - -import "fmt" - -const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" - -var _TokenType_map = map[TokenType]string{ - 33: _TokenType_name[0:4], - 36: _TokenType_name[4:9], - 37: _TokenType_name[9:16], - 40: _TokenType_name[16:22], - 41: _TokenType_name[22:28], - 42: _TokenType_name[28:32], - 43: _TokenType_name[32:36], - 44: _TokenType_name[36:41], - 45: _TokenType_name[41:46], - 46: _TokenType_name[46:52], - 47: _TokenType_name[52:57], - 58: _TokenType_name[57:62], - 60: _TokenType_name[62:64], - 61: _TokenType_name[64:69], - 62: _TokenType_name[69:71], - 63: _TokenType_name[71:79], - 66: _TokenType_name[79:83], - 70: _TokenType_name[83:88], - 73: _TokenType_name[88:95], - 83: _TokenType_name[95:101], - 91: _TokenType_name[101:109], - 93: _TokenType_name[109:117], - 105: _TokenType_name[117:127], - 111: _TokenType_name[127:134], - 125: _TokenType_name[134:137], - 8220: _TokenType_name[137:143], - 8221: _TokenType_name[143:149], - 8743: _TokenType_name[149:152], - 8744: _TokenType_name[152:154], - 8800: _TokenType_name[154:162], - 8804: _TokenType_name[162:165], - 8805: _TokenType_name[165:168], - 9220: _TokenType_name[168:171], - 65533: _TokenType_name[171:178], -} - -func (i TokenType) String() string { - if str, ok := _TokenType_map[i]; ok { - return str - } - return fmt.Sprintf("TokenType(%d)", i) -} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go deleted file mode 100644 index e69df29..0000000 --- a/vendor/github.com/hashicorp/hil/transform_fixed.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" -) - -// FixedValueTransform transforms an AST to return a fixed value for -// all interpolations. i.e. you can make "hi ${anything}" always -// turn into "hi foo". -// -// The primary use case for this is for config validations where you can -// verify that interpolations result in a certain type of string. -func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { - // We visit the nodes in top-down order - result := root - switch n := result.(type) { - case *ast.Output: - for i, v := range n.Exprs { - n.Exprs[i] = FixedValueTransform(v, Value) - } - case *ast.LiteralNode: - // We keep it as-is - default: - // Anything else we replace - result = Value - } - - return result -} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go deleted file mode 100644 index 0ace830..0000000 --- a/vendor/github.com/hashicorp/hil/walk.go +++ /dev/null @@ -1,266 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/reflectwalk" -) - -// WalkFn is the type of function to pass to Walk. Modify fields within -// WalkData to control whether replacement happens. -type WalkFn func(*WalkData) error - -// WalkData is the structure passed to the callback of the Walk function. -// -// This structure contains data passed in as well as fields that are expected -// to be written by the caller as a result. Please see the documentation for -// each field for more information. -type WalkData struct { - // Root is the parsed root of this HIL program - Root ast.Node - - // Location is the location within the structure where this - // value was found. This can be used to modify behavior within - // slices and so on. - Location reflectwalk.Location - - // The below two values must be set by the callback to have any effect. - // - // Replace, if true, will replace the value in the structure with - // ReplaceValue. It is up to the caller to make sure this is a string. - Replace bool - ReplaceValue string -} - -// Walk will walk an arbitrary Go structure and parse any string as an -// HIL program and call the callback cb to determine what to replace it -// with. -// -// This function is very useful for arbitrary HIL program interpolation -// across a complex configuration structure. Due to the heavy use of -// reflection in this function, it is recommend to write many unit tests -// with your typical configuration structures to hilp mitigate the risk -// of panics. -func Walk(v interface{}, cb WalkFn) error { - walker := &interpolationWalker{F: cb} - return reflectwalk.Walk(v, walker) -} - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - F WalkFn - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex int - unknownKeys []string -} - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - w.key = append(w.key, k.String()) - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.F == nil { - return nil - } - - data := WalkData{Root: astRoot, Location: w.loc} - if err := w.F(&data); err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if data.Replace { - /* - if remove { - w.removeCurrent() - return nil - } - */ - - resultVal := reflect.ValueOf(data.ReplaceValue) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } - } - - panic("No container found for removeCurrent") -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func (w *interpolationWalker) splitSlice() { - // Get the []interface{} slice so we can do some operations on - // it without dealing with reflection. We'll document each step - // here to be clear. - var s []interface{} - raw := w.cs[len(w.cs)-1] - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - default: - panic("Unknown kind: " + raw.Kind().String()) - } - - // Check if we have any elements that we need to split. If not, then - // just return since we're done. - split := false - if !split { - return - } - - // Make a new result slice that is twice the capacity to fit our growth. - result := make([]interface{}, 0, len(s)*2) - - // Go over each element of the original slice and start building up - // the resulting slice by splitting where we have to. - for _, v := range s { - sv, ok := v.(string) - if !ok { - // Not a string, so just set it - result = append(result, v) - continue - } - - // Not a string list, so just set it - result = append(result, sv) - } - - // Our slice is now done, we have to replace the slice now - // with this new one that we have. - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/logutils/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md deleted file mode 100644 index 49490ea..0000000 --- a/vendor/github.com/hashicorp/logutils/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# logutils - -logutils is a Go package that augments the standard library "log" package -to make logging a bit more modern, without fragmenting the Go ecosystem -with new logging packages. - -## The simplest thing that could possibly work - -Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following: - -```go -package main - -import ( - "log" - "os" - - "github.com/hashicorp/logutils" -) - -func main() { - filter := &logutils.LevelFilter{ - Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"}, - MinLevel: logutils.LogLevel("WARN"), - Writer: os.Stderr, - } - log.SetOutput(filter) - - log.Print("[DEBUG] Debugging") // this will not print - log.Print("[WARN] Warning") // this will - log.Print("[ERROR] Erring") // and so will this - log.Print("Message I haven't updated") // and so will this -} -``` - -This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before. diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go deleted file mode 100644 index 6381bf1..0000000 --- a/vendor/github.com/hashicorp/logutils/level.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package logutils augments the standard log package with levels. -package logutils - -import ( - "bytes" - "io" - "sync" -) - -type LogLevel string - -// LevelFilter is an io.Writer that can be used with a logger that -// will filter out log messages that aren't at least a certain level. -// -// Once the filter is in use somewhere, it is not safe to modify -// the structure. -type LevelFilter struct { - // Levels is the list of log levels, in increasing order of - // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. - Levels []LogLevel - - // MinLevel is the minimum level allowed through - MinLevel LogLevel - - // The underlying io.Writer where log messages that pass the filter - // will be set. - Writer io.Writer - - badLevels map[LogLevel]struct{} - once sync.Once -} - -// Check will check a given line if it would be included in the level -// filter. -func (f *LevelFilter) Check(line []byte) bool { - f.once.Do(f.init) - - // Check for a log level - var level LogLevel - x := bytes.IndexByte(line, '[') - if x >= 0 { - y := bytes.IndexByte(line[x:], ']') - if y >= 0 { - level = LogLevel(line[x+1 : x+y]) - } - } - - _, ok := f.badLevels[level] - return !ok -} - -func (f *LevelFilter) Write(p []byte) (n int, err error) { - // Note in general that io.Writer can receive any byte sequence - // to write, but the "log" package always guarantees that we only - // get a single line. We use that as a slight optimization within - // this method, assuming we're dealing with a single, complete line - // of log data. - - if !f.Check(p) { - return len(p), nil - } - - return f.Writer.Write(p) -} - -// SetMinLevel is used to update the minimum log level -func (f *LevelFilter) SetMinLevel(min LogLevel) { - f.MinLevel = min - f.init() -} - -func (f *LevelFilter) init() { - badLevels := make(map[LogLevel]struct{}) - for _, level := range f.Levels { - if level == f.MinLevel { - break - } - badLevels[level] = struct{}{} - } - f.badLevels = badLevels -} diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/terraform/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go deleted file mode 100644 index 5f4e89e..0000000 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ /dev/null @@ -1,86 +0,0 @@ -package config - -// Append appends one configuration to another. -// -// Append assumes that both configurations will not have -// conflicting variables, resources, etc. If they do, the -// problems will be caught in the validation phase. -// -// It is possible that c1, c2 on their own are not valid. For -// example, a resource in c2 may reference a variable in c1. But -// together, they would be valid. -func Append(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Append unknown keys, but keep them unique since it is a set - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // merge Terraform blocks - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - if len(c1.Modules) > 0 || len(c2.Modules) > 0 { - c.Modules = make( - []*Module, 0, len(c1.Modules)+len(c2.Modules)) - c.Modules = append(c.Modules, c1.Modules...) - c.Modules = append(c.Modules, c2.Modules...) - } - - if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 { - c.Outputs = make( - []*Output, 0, len(c1.Outputs)+len(c2.Outputs)) - c.Outputs = append(c.Outputs, c1.Outputs...) - c.Outputs = append(c.Outputs, c2.Outputs...) - } - - if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 { - c.ProviderConfigs = make( - []*ProviderConfig, - 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs)) - c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...) - c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...) - } - - if len(c1.Resources) > 0 || len(c2.Resources) > 0 { - c.Resources = make( - []*Resource, - 0, len(c1.Resources)+len(c2.Resources)) - c.Resources = append(c.Resources, c1.Resources...) - c.Resources = append(c.Resources, c2.Resources...) - } - - if len(c1.Variables) > 0 || len(c2.Variables) > 0 { - c.Variables = make( - []*Variable, 0, len(c1.Variables)+len(c2.Variables)) - c.Variables = append(c.Variables, c1.Variables...) - c.Variables = append(c.Variables, c2.Variables...) - } - - return c, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go deleted file mode 100644 index 3f756dc..0000000 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ /dev/null @@ -1,1074 +0,0 @@ -// The config package is responsible for loading and validating the -// configuration. -package config - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/helper/hilmapstructure" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/mitchellh/reflectwalk" -) - -// NameRegexp is the regular expression that all names (modules, providers, -// resources, etc.) must follow. -var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) - -// Config is the configuration that comes from loading a collection -// of Terraform templates. -type Config struct { - // Dir is the path to the directory where this configuration was - // loaded from. If it is blank, this configuration wasn't loaded from - // any meaningful directory. - Dir string - - Terraform *Terraform - Atlas *AtlasConfig - Modules []*Module - ProviderConfigs []*ProviderConfig - Resources []*Resource - Variables []*Variable - Outputs []*Output - - // The fields below can be filled in by loaders for validation - // purposes. - unknownKeys []string -} - -// AtlasConfig is the configuration for building in HashiCorp's Atlas. -type AtlasConfig struct { - Name string - Include []string - Exclude []string -} - -// Module is a module used within a configuration. -// -// This does not represent a module itself, this represents a module -// call-site within an existing configuration. -type Module struct { - Name string - Source string - RawConfig *RawConfig -} - -// ProviderConfig is the configuration for a resource provider. -// -// For example, Terraform needs to set the AWS access keys for the AWS -// resource provider. -type ProviderConfig struct { - Name string - Alias string - Version string - RawConfig *RawConfig -} - -// A resource represents a single Terraform resource in the configuration. -// A Terraform resource is something that supports some or all of the -// usual "create, read, update, delete" operations, depending on -// the given Mode. -type Resource struct { - Mode ResourceMode // which operations the resource supports - Name string - Type string - RawCount *RawConfig - RawConfig *RawConfig - Provisioners []*Provisioner - Provider string - DependsOn []string - Lifecycle ResourceLifecycle -} - -// Copy returns a copy of this Resource. Helpful for avoiding shared -// config pointers across multiple pieces of the graph that need to do -// interpolation. -func (r *Resource) Copy() *Resource { - n := &Resource{ - Mode: r.Mode, - Name: r.Name, - Type: r.Type, - RawCount: r.RawCount.Copy(), - RawConfig: r.RawConfig.Copy(), - Provisioners: make([]*Provisioner, 0, len(r.Provisioners)), - Provider: r.Provider, - DependsOn: make([]string, len(r.DependsOn)), - Lifecycle: *r.Lifecycle.Copy(), - } - for _, p := range r.Provisioners { - n.Provisioners = append(n.Provisioners, p.Copy()) - } - copy(n.DependsOn, r.DependsOn) - return n -} - -// ResourceLifecycle is used to store the lifecycle tuning parameters -// to allow customized behavior -type ResourceLifecycle struct { - CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` - PreventDestroy bool `mapstructure:"prevent_destroy"` - IgnoreChanges []string `mapstructure:"ignore_changes"` -} - -// Copy returns a copy of this ResourceLifecycle -func (r *ResourceLifecycle) Copy() *ResourceLifecycle { - n := &ResourceLifecycle{ - CreateBeforeDestroy: r.CreateBeforeDestroy, - PreventDestroy: r.PreventDestroy, - IgnoreChanges: make([]string, len(r.IgnoreChanges)), - } - copy(n.IgnoreChanges, r.IgnoreChanges) - return n -} - -// Provisioner is a configured provisioner step on a resource. -type Provisioner struct { - Type string - RawConfig *RawConfig - ConnInfo *RawConfig - - When ProvisionerWhen - OnFailure ProvisionerOnFailure -} - -// Copy returns a copy of this Provisioner -func (p *Provisioner) Copy() *Provisioner { - return &Provisioner{ - Type: p.Type, - RawConfig: p.RawConfig.Copy(), - ConnInfo: p.ConnInfo.Copy(), - When: p.When, - OnFailure: p.OnFailure, - } -} - -// Variable is a variable defined within the configuration. -type Variable struct { - Name string - DeclaredType string `mapstructure:"type"` - Default interface{} - Description string -} - -// Output is an output defined within the configuration. An output is -// resulting data that is highlighted by Terraform when finished. An -// output marked Sensitive will be output in a masked form following -// application, but will still be available in state. -type Output struct { - Name string - DependsOn []string - Description string - Sensitive bool - RawConfig *RawConfig -} - -// VariableType is the type of value a variable is holding, and returned -// by the Type() function on variables. -type VariableType byte - -const ( - VariableTypeUnknown VariableType = iota - VariableTypeString - VariableTypeList - VariableTypeMap -) - -func (v VariableType) Printable() string { - switch v { - case VariableTypeString: - return "string" - case VariableTypeMap: - return "map" - case VariableTypeList: - return "list" - default: - return "unknown" - } -} - -// ProviderConfigName returns the name of the provider configuration in -// the given mapping that maps to the proper provider configuration -// for this resource. -func ProviderConfigName(t string, pcs []*ProviderConfig) string { - lk := "" - for _, v := range pcs { - k := v.Name - if strings.HasPrefix(t, k) && len(k) > len(lk) { - lk = k - } - } - - return lk -} - -// A unique identifier for this module. -func (r *Module) Id() string { - return fmt.Sprintf("%s", r.Name) -} - -// Count returns the count of this resource. -func (r *Resource) Count() (int, error) { - raw := r.RawCount.Value() - count, ok := r.RawCount.Value().(string) - if !ok { - return 0, fmt.Errorf( - "expected count to be a string or int, got %T", raw) - } - - v, err := strconv.ParseInt(count, 0, 0) - if err != nil { - return 0, err - } - - return int(v), nil -} - -// A unique identifier for this resource. -func (r *Resource) Id() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", r.Mode)) - } -} - -// ProviderFullName returns the full name of the provider for this resource, -// which may either be specified explicitly using the "provider" meta-argument -// or implied by the prefix on the resource type name. -func (r *Resource) ProviderFullName() string { - return ResourceProviderFullName(r.Type, r.Provider) -} - -// ResourceProviderFullName returns the full (dependable) name of the -// provider for a hypothetical resource with the given resource type and -// explicit provider string. If the explicit provider string is empty then -// the provider name is inferred from the resource type name. -func ResourceProviderFullName(resourceType, explicitProvider string) string { - if explicitProvider != "" { - return explicitProvider - } - - idx := strings.IndexRune(resourceType, '_') - if idx == -1 { - // If no underscores, the resource name is assumed to be - // also the provider name, e.g. if the provider exposes - // only a single resource of each type. - return resourceType - } - - return resourceType[:idx] -} - -// Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() error { - if c == nil { - return nil - } - - var errs []error - - for _, k := range c.unknownKeys { - errs = append(errs, fmt.Errorf( - "Unknown root level key: %s", k)) - } - - // Validate the Terraform config - if tf := c.Terraform; tf != nil { - errs = append(errs, c.Terraform.Validate()...) - } - - vars := c.InterpolatedVariables() - varMap := make(map[string]*Variable) - for _, v := range c.Variables { - if _, ok := varMap[v.Name]; ok { - errs = append(errs, fmt.Errorf( - "Variable '%s': duplicate found. Variable names must be unique.", - v.Name)) - } - - varMap[v.Name] = v - } - - for k, _ := range varMap { - if !NameRegexp.MatchString(k) { - errs = append(errs, fmt.Errorf( - "variable %q: variable name must match regular expresion %s", - k, NameRegexp)) - } - } - - for _, v := range c.Variables { - if v.Type() == VariableTypeUnknown { - errs = append(errs, fmt.Errorf( - "Variable '%s': must be a string or a map", - v.Name)) - continue - } - - interp := false - fn := func(n ast.Node) (interface{}, error) { - // LiteralNode is a literal string (outside of a ${ ... } sequence). - // interpolationWalker skips most of these. but in particular it - // visits those that have escaped sequences (like $${foo}) as a - // signal that *some* processing is required on this string. For - // our purposes here though, this is fine and not an interpolation. - if _, ok := n.(*ast.LiteralNode); !ok { - interp = true - } - return "", nil - } - - w := &interpolationWalker{F: fn} - if v.Default != nil { - if err := reflectwalk.Walk(v.Default, w); err == nil { - if interp { - errs = append(errs, fmt.Errorf( - "Variable '%s': cannot contain interpolations", - v.Name)) - } - } - } - } - - // Check for references to user variables that do not actually - // exist and record those errors. - for source, vs := range vars { - for _, v := range vs { - uv, ok := v.(*UserVariable) - if !ok { - continue - } - - if _, ok := varMap[uv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", - source, - uv.Name)) - } - } - } - - // Check that all count variables are valid. - for source, vs := range vars { - for _, rawV := range vs { - switch v := rawV.(type) { - case *CountVariable: - if v.Type == CountValueInvalid { - errs = append(errs, fmt.Errorf( - "%s: invalid count variable: %s", - source, - v.FullKey())) - } - case *PathVariable: - if v.Type == PathValueInvalid { - errs = append(errs, fmt.Errorf( - "%s: invalid path variable: %s", - source, - v.FullKey())) - } - } - } - } - - // Check that providers aren't declared multiple times and that their - // version constraints, where present, are syntactically valid. - providerSet := make(map[string]struct{}) - for _, p := range c.ProviderConfigs { - name := p.FullName() - if _, ok := providerSet[name]; ok { - errs = append(errs, fmt.Errorf( - "provider.%s: declared multiple times, you can only declare a provider once", - name)) - continue - } - - if p.Version != "" { - _, err := discovery.ConstraintStr(p.Version).Parse() - if err != nil { - errs = append(errs, fmt.Errorf( - "provider.%s: invalid version constraint %q: %s", - name, p.Version, err, - )) - } - } - - providerSet[name] = struct{}{} - } - - // Check that all references to modules are valid - modules := make(map[string]*Module) - dupped := make(map[string]struct{}) - for _, m := range c.Modules { - // Check for duplicates - if _, ok := modules[m.Id()]; ok { - if _, ok := dupped[m.Id()]; !ok { - dupped[m.Id()] = struct{}{} - - errs = append(errs, fmt.Errorf( - "%s: module repeated multiple times", - m.Id())) - } - - // Already seen this module, just skip it - continue - } - - modules[m.Id()] = m - - // Check that the source has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": m.Source, - }) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: module source error: %s", - m.Id(), err)) - } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: module source cannot contain interpolations", - m.Id())) - } - - // Check that the name matches our regexp - if !NameRegexp.Match([]byte(m.Name)) { - errs = append(errs, fmt.Errorf( - "%s: module name can only contain letters, numbers, "+ - "dashes, and underscores", - m.Id())) - } - - // Check that the configuration can all be strings, lists or maps - raw := make(map[string]interface{}) - for k, v := range m.RawConfig.Raw { - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - raw[k] = strVal - continue - } - - var mapVal map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil { - raw[k] = mapVal - continue - } - - var sliceVal []interface{} - if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil { - raw[k] = sliceVal - continue - } - - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string, list or map value", - m.Id(), k)) - } - - // Check for invalid count variables - for _, v := range m.RawConfig.Variables { - switch v.(type) { - case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", m.Name)) - case *SelfVariable: - errs = append(errs, fmt.Errorf( - "%s: self variables are only valid within resources", m.Name)) - } - } - - // Update the raw configuration to only contain the string values - m.RawConfig, err = NewRawConfig(raw) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: can't initialize configuration: %s", - m.Id(), err)) - } - } - dupped = nil - - // Check that all variables for modules reference modules that - // exist. - for source, vs := range vars { - for _, v := range vs { - mv, ok := v.(*ModuleVariable) - if !ok { - continue - } - - if _, ok := modules[mv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown module referenced: %s", - source, - mv.Name)) - } - } - } - - // Check that all references to resources are valid - resources := make(map[string]*Resource) - dupped = make(map[string]struct{}) - for _, r := range c.Resources { - if _, ok := resources[r.Id()]; ok { - if _, ok := dupped[r.Id()]; !ok { - dupped[r.Id()] = struct{}{} - - errs = append(errs, fmt.Errorf( - "%s: resource repeated multiple times", - r.Id())) - } - } - - resources[r.Id()] = r - } - dupped = nil - - // Validate resources - for n, r := range resources { - // Verify count variables - for _, v := range r.RawCount.Variables { - switch v.(type) { - case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: resource count can't reference count variable: %s", - n, - v.FullKey())) - case *SimpleVariable: - errs = append(errs, fmt.Errorf( - "%s: resource count can't reference variable: %s", - n, - v.FullKey())) - - // Good - case *ModuleVariable: - case *ResourceVariable: - case *TerraformVariable: - case *UserVariable: - - default: - errs = append(errs, fmt.Errorf( - "Internal error. Unknown type in count var in %s: %T", - n, v)) - } - } - - // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: resource count must be an integer", - n)) - } - r.RawCount.init() - - // Validate DependsOn - errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) - - // Verify provisioners - for _, p := range r.Provisioners { - // This validation checks that there are no splat variables - // referencing ourself. This currently is not allowed. - - for _, v := range p.ConnInfo.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) - break - } - } - - for _, v := range p.RawConfig.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) - break - } - } - - // Check for invalid when/onFailure values, though this should be - // picked up by the loader we check here just in case. - if p.When == ProvisionerWhenInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'when' value is invalid", n)) - } - if p.OnFailure == ProvisionerOnFailureInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n)) - } - } - - // Verify ignore_changes contains valid entries - for _, v := range r.Lifecycle.IgnoreChanges { - if strings.Contains(v, "*") && v != "*" { - errs = append(errs, fmt.Errorf( - "%s: ignore_changes does not support using a partial string "+ - "together with a wildcard: %s", n, v)) - } - } - - // Verify ignore_changes has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": r.Lifecycle.IgnoreChanges, - }) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: lifecycle ignore_changes error: %s", - n, err)) - } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: lifecycle ignore_changes cannot contain interpolations", - n)) - } - - // If it is a data source then it can't have provisioners - if r.Mode == DataResourceMode { - if _, ok := r.RawConfig.Raw["provisioner"]; ok { - errs = append(errs, fmt.Errorf( - "%s: data sources cannot have provisioners", - n)) - } - } - } - - for source, vs := range vars { - for _, v := range vs { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - id := rv.ResourceId() - if _, ok := resources[id]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown resource '%s' referenced in variable %s", - source, - id, - rv.FullKey())) - continue - } - } - } - - // Check that all outputs are valid - { - found := make(map[string]struct{}) - for _, o := range c.Outputs { - // Verify the output is new - if _, ok := found[o.Name]; ok { - errs = append(errs, fmt.Errorf( - "%s: duplicate output. output names must be unique.", - o.Name)) - continue - } - found[o.Name] = struct{}{} - - var invalidKeys []string - valueKeyFound := false - for k := range o.RawConfig.Raw { - if k == "value" { - valueKeyFound = true - continue - } - if k == "sensitive" { - if sensitive, ok := o.RawConfig.config[k].(bool); ok { - if sensitive { - o.Sensitive = true - } - continue - } - - errs = append(errs, fmt.Errorf( - "%s: value for 'sensitive' must be boolean", - o.Name)) - continue - } - if k == "description" { - if desc, ok := o.RawConfig.config[k].(string); ok { - o.Description = desc - continue - } - - errs = append(errs, fmt.Errorf( - "%s: value for 'description' must be string", - o.Name)) - continue - } - invalidKeys = append(invalidKeys, k) - } - if len(invalidKeys) > 0 { - errs = append(errs, fmt.Errorf( - "%s: output has invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "))) - } - if !valueKeyFound { - errs = append(errs, fmt.Errorf( - "%s: output is missing required 'value' key", o.Name)) - } - - for _, v := range o.RawConfig.Variables { - if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", o.Name)) - } - } - } - } - - // Validate the self variable - for source, rc := range c.rawConfigs() { - // Ignore provisioners. This is a pretty brittle way to do this, - // but better than also repeating all the resources. - if strings.Contains(source, "provision") { - continue - } - - for _, v := range rc.Variables { - if _, ok := v.(*SelfVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: cannot contain self-reference %s", source, v.FullKey())) - } - } - } - - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil -} - -// InterpolatedVariables is a helper that returns a mapping of all the interpolated -// variables within the configuration. This is used to verify references -// are valid in the Validate step. -func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable { - result := make(map[string][]InterpolatedVariable) - for source, rc := range c.rawConfigs() { - for _, v := range rc.Variables { - result[source] = append(result[source], v) - } - } - return result -} - -// rawConfigs returns all of the RawConfigs that are available keyed by -// a human-friendly source. -func (c *Config) rawConfigs() map[string]*RawConfig { - result := make(map[string]*RawConfig) - for _, m := range c.Modules { - source := fmt.Sprintf("module '%s'", m.Name) - result[source] = m.RawConfig - } - - for _, pc := range c.ProviderConfigs { - source := fmt.Sprintf("provider config '%s'", pc.Name) - result[source] = pc.RawConfig - } - - for _, rc := range c.Resources { - source := fmt.Sprintf("resource '%s'", rc.Id()) - result[source+" count"] = rc.RawCount - result[source+" config"] = rc.RawConfig - - for i, p := range rc.Provisioners { - subsource := fmt.Sprintf( - "%s provisioner %s (#%d)", - source, p.Type, i+1) - result[subsource] = p.RawConfig - } - } - - for _, o := range c.Outputs { - source := fmt.Sprintf("output '%s'", o.Name) - result[source] = o.RawConfig - } - - return result -} - -func (c *Config) validateDependsOn( - n string, - v []string, - resources map[string]*Resource, - modules map[string]*Module) []error { - // Verify depends on points to resources that all exist - var errs []error - for _, d := range v { - // Check if we contain interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "value": d, - }) - if err == nil && len(rc.Variables) > 0 { - errs = append(errs, fmt.Errorf( - "%s: depends on value cannot contain interpolations: %s", - n, d)) - continue - } - - // If it is a module, verify it is a module - if strings.HasPrefix(d, "module.") { - name := d[len("module."):] - if _, ok := modules[name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent module '%s'", - n, name)) - } - - continue - } - - // Check resources - if _, ok := resources[d]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent resource '%s'", - n, d)) - } - } - - return errs -} - -func (m *Module) mergerName() string { - return m.Id() -} - -func (m *Module) mergerMerge(other merger) merger { - m2 := other.(*Module) - - result := *m - result.Name = m2.Name - result.RawConfig = result.RawConfig.merge(m2.RawConfig) - - if m2.Source != "" { - result.Source = m2.Source - } - - return &result -} - -func (o *Output) mergerName() string { - return o.Name -} - -func (o *Output) mergerMerge(m merger) merger { - o2 := m.(*Output) - - result := *o - result.Name = o2.Name - result.Description = o2.Description - result.RawConfig = result.RawConfig.merge(o2.RawConfig) - result.Sensitive = o2.Sensitive - result.DependsOn = o2.DependsOn - - return &result -} - -func (c *ProviderConfig) GoString() string { - return fmt.Sprintf("*%#v", *c) -} - -func (c *ProviderConfig) FullName() string { - if c.Alias == "" { - return c.Name - } - - return fmt.Sprintf("%s.%s", c.Name, c.Alias) -} - -func (c *ProviderConfig) mergerName() string { - return c.Name -} - -func (c *ProviderConfig) mergerMerge(m merger) merger { - c2 := m.(*ProviderConfig) - - result := *c - result.Name = c2.Name - result.RawConfig = result.RawConfig.merge(c2.RawConfig) - - if c2.Alias != "" { - result.Alias = c2.Alias - } - - return &result -} - -func (r *Resource) mergerName() string { - return r.Id() -} - -func (r *Resource) mergerMerge(m merger) merger { - r2 := m.(*Resource) - - result := *r - result.Mode = r2.Mode - result.Name = r2.Name - result.Type = r2.Type - result.RawConfig = result.RawConfig.merge(r2.RawConfig) - - if r2.RawCount.Value() != "1" { - result.RawCount = r2.RawCount - } - - if len(r2.Provisioners) > 0 { - result.Provisioners = r2.Provisioners - } - - return &result -} - -// Merge merges two variables to create a new third variable. -func (v *Variable) Merge(v2 *Variable) *Variable { - // Shallow copy the variable - result := *v - - // The names should be the same, but the second name always wins. - result.Name = v2.Name - - if v2.DeclaredType != "" { - result.DeclaredType = v2.DeclaredType - } - if v2.Default != nil { - result.Default = v2.Default - } - if v2.Description != "" { - result.Description = v2.Description - } - - return &result -} - -var typeStringMap = map[string]VariableType{ - "string": VariableTypeString, - "map": VariableTypeMap, - "list": VariableTypeList, -} - -// Type returns the type of variable this is. -func (v *Variable) Type() VariableType { - if v.DeclaredType != "" { - declaredType, ok := typeStringMap[v.DeclaredType] - if !ok { - return VariableTypeUnknown - } - - return declaredType - } - - return v.inferTypeFromDefault() -} - -// ValidateTypeAndDefault ensures that default variable value is compatible -// with the declared type (if one exists), and that the type is one which is -// known to Terraform -func (v *Variable) ValidateTypeAndDefault() error { - // If an explicit type is declared, ensure it is valid - if v.DeclaredType != "" { - if _, ok := typeStringMap[v.DeclaredType]; !ok { - validTypes := []string{} - for k := range typeStringMap { - validTypes = append(validTypes, k) - } - return fmt.Errorf( - "Variable '%s' type must be one of [%s] - '%s' is not a valid type", - v.Name, - strings.Join(validTypes, ", "), - v.DeclaredType, - ) - } - } - - if v.DeclaredType == "" || v.Default == nil { - return nil - } - - if v.inferTypeFromDefault() != v.Type() { - return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')", - v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable()) - } - - return nil -} - -func (v *Variable) mergerName() string { - return v.Name -} - -func (v *Variable) mergerMerge(m merger) merger { - return v.Merge(m.(*Variable)) -} - -// Required tests whether a variable is required or not. -func (v *Variable) Required() bool { - return v.Default == nil -} - -// inferTypeFromDefault contains the logic for the old method of inferring -// variable types - we can also use this for validating that the declared -// type matches the type of the default value -func (v *Variable) inferTypeFromDefault() VariableType { - if v.Default == nil { - return VariableTypeString - } - - var s string - if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil { - v.Default = s - return VariableTypeString - } - - var m map[string]interface{} - if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil { - v.Default = m - return VariableTypeMap - } - - var l []interface{} - if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil { - v.Default = l - return VariableTypeList - } - - return VariableTypeUnknown -} - -func (m ResourceMode) Taintable() bool { - switch m { - case ManagedResourceMode: - return true - case DataResourceMode: - return false - default: - panic(fmt.Errorf("unsupported ResourceMode value %s", m)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go deleted file mode 100644 index 0b3abbc..0000000 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ /dev/null @@ -1,338 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// TestString is a Stringer-like function that outputs a string that can -// be used to easily compare multiple Config structures in unit tests. -// -// This function has no practical use outside of unit tests and debugging. -func (c *Config) TestString() string { - if c == nil { - return "" - } - - var buf bytes.Buffer - if len(c.Modules) > 0 { - buf.WriteString("Modules:\n\n") - buf.WriteString(modulesStr(c.Modules)) - buf.WriteString("\n\n") - } - - if len(c.Variables) > 0 { - buf.WriteString("Variables:\n\n") - buf.WriteString(variablesStr(c.Variables)) - buf.WriteString("\n\n") - } - - if len(c.ProviderConfigs) > 0 { - buf.WriteString("Provider Configs:\n\n") - buf.WriteString(providerConfigsStr(c.ProviderConfigs)) - buf.WriteString("\n\n") - } - - if len(c.Resources) > 0 { - buf.WriteString("Resources:\n\n") - buf.WriteString(resourcesStr(c.Resources)) - buf.WriteString("\n\n") - } - - if len(c.Outputs) > 0 { - buf.WriteString("Outputs:\n\n") - buf.WriteString(outputsStr(c.Outputs)) - buf.WriteString("\n") - } - - return strings.TrimSpace(buf.String()) -} - -func terraformStr(t *Terraform) string { - result := "" - - if b := t.Backend; b != nil { - result += fmt.Sprintf("backend (%s)\n", b.Type) - - keys := make([]string, 0, len(b.RawConfig.Raw)) - for k, _ := range b.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func modulesStr(ms []*Module) string { - result := "" - order := make([]int, 0, len(ms)) - ks := make([]string, 0, len(ms)) - mapping := make(map[string]int) - for i, m := range ms { - k := m.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - m := ms[i] - result += fmt.Sprintf("%s\n", m.Id()) - - ks := make([]string, 0, len(m.RawConfig.Raw)) - for k, _ := range m.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - result += fmt.Sprintf(" source = %s\n", m.Source) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func outputsStr(os []*Output) string { - ns := make([]string, 0, len(os)) - m := make(map[string]*Output) - for _, o := range os { - ns = append(ns, o.Name) - m[o.Name] = o - } - sort.Strings(ns) - - result := "" - for _, n := range ns { - o := m[n] - - result += fmt.Sprintf("%s\n", n) - - if len(o.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range o.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(o.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range o.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a provider configs field into a deterministic -// string value for comparison in tests. -func providerConfigsStr(pcs []*ProviderConfig) string { - result := "" - - ns := make([]string, 0, len(pcs)) - m := make(map[string]*ProviderConfig) - for _, n := range pcs { - ns = append(ns, n.Name) - m[n.Name] = n - } - sort.Strings(ns) - - for _, n := range ns { - pc := m[n] - - result += fmt.Sprintf("%s\n", n) - - keys := make([]string, 0, len(pc.RawConfig.Raw)) - for k, _ := range pc.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - - if len(pc.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range pc.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a resources field into a deterministic -// string value for comparison in tests. -func resourcesStr(rs []*Resource) string { - result := "" - order := make([]int, 0, len(rs)) - ks := make([]string, 0, len(rs)) - mapping := make(map[string]int) - for i, r := range rs { - k := r.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - r := rs[i] - result += fmt.Sprintf( - "%s (x%s)\n", - r.Id(), - r.RawCount.Value()) - - ks := make([]string, 0, len(r.RawConfig.Raw)) - for k, _ := range r.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - - if len(r.Provisioners) > 0 { - result += fmt.Sprintf(" provisioners\n") - for _, p := range r.Provisioners { - when := "" - if p.When != ProvisionerWhenCreate { - when = fmt.Sprintf(" (%s)", p.When.String()) - } - - result += fmt.Sprintf(" %s%s\n", p.Type, when) - - if p.OnFailure != ProvisionerOnFailureFail { - result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) - } - - ks := make([]string, 0, len(p.RawConfig.Raw)) - for k, _ := range p.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - } - - if len(r.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range r.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(r.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - - ks := make([]string, 0, len(r.RawConfig.Variables)) - for k, _ := range r.RawConfig.Variables { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - rawV := r.RawConfig.Variables[k] - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a variables field into a deterministic -// string value for comparison in tests. -func variablesStr(vs []*Variable) string { - result := "" - ks := make([]string, 0, len(vs)) - m := make(map[string]*Variable) - for _, v := range vs { - ks = append(ks, v.Name) - m[v.Name] = v - } - sort.Strings(ks) - - for _, k := range ks { - v := m[k] - - required := "" - if v.Required() { - required = " (required)" - } - - declaredType := "" - if v.DeclaredType != "" { - declaredType = fmt.Sprintf(" (%s)", v.DeclaredType) - } - - if v.Default == nil || v.Default == "" { - v.Default = "<>" - } - if v.Description == "" { - v.Description = "<>" - } - - result += fmt.Sprintf( - "%s%s%s\n %v\n %s\n", - k, - required, - declaredType, - v.Default, - v.Description) - } - - return strings.TrimSpace(result) -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go deleted file mode 100644 index 8535c96..0000000 --- a/vendor/github.com/hashicorp/terraform/config/config_terraform.go +++ /dev/null @@ -1,117 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-version" - "github.com/mitchellh/hashstructure" -) - -// Terraform is the Terraform meta-configuration that can be present -// in configuration files for configuring Terraform itself. -type Terraform struct { - RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint) - Backend *Backend // See Backend struct docs -} - -// Validate performs the validation for just the Terraform configuration. -func (t *Terraform) Validate() []error { - var errs []error - - if raw := t.RequiredVersion; raw != "" { - // Check that the value has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": raw, - }) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: %s", err)) - } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "terraform.required_version: cannot contain interpolations")) - } else { - // Check it is valid - _, err := version.NewConstraint(raw) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: invalid syntax: %s", err)) - } - } - } - - if t.Backend != nil { - errs = append(errs, t.Backend.Validate()...) - } - - return errs -} - -// Merge t with t2. -// Any conflicting fields are overwritten by t2. -func (t *Terraform) Merge(t2 *Terraform) { - if t2.RequiredVersion != "" { - t.RequiredVersion = t2.RequiredVersion - } - - if t2.Backend != nil { - t.Backend = t2.Backend - } -} - -// Backend is the configuration for the "backend" to use with Terraform. -// A backend is responsible for all major behavior of Terraform's core. -// The abstraction layer above the core (the "backend") allows for behavior -// such as remote operation. -type Backend struct { - Type string - RawConfig *RawConfig - - // Hash is a unique hash code representing the original configuration - // of the backend. This won't be recomputed unless Rehash is called. - Hash uint64 -} - -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -func (b *Backend) Rehash() uint64 { - // If we have no backend, the value is zero - if b == nil { - return 0 - } - - // Use hashstructure to hash only our type with the config. - code, err := hashstructure.Hash(map[string]interface{}{ - "type": b.Type, - "config": b.RawConfig.Raw, - }, nil) - - // This should never happen since we have just some basic primitives - // so panic if there is an error. - if err != nil { - panic(err) - } - - return code -} - -func (b *Backend) Validate() []error { - if len(b.RawConfig.Interpolations) > 0 { - return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))} - } - - return nil -} - -const errBackendInterpolations = ` -terraform.backend: configuration cannot contain interpolations - -The backend configuration is loaded by Terraform extremely early, before -the core of Terraform can be initialized. This is necessary because the backend -dictates the behavior of that core. The core is what handles interpolation -processing. Because of this, interpolations cannot be used in backend -configuration. - -If you'd like to parameterize backend configuration, we recommend using -partial configuration with the "-backend-config" flag to "terraform init". -` diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go deleted file mode 100644 index 08dc0fe..0000000 --- a/vendor/github.com/hashicorp/terraform/config/config_tree.go +++ /dev/null @@ -1,43 +0,0 @@ -package config - -// configTree represents a tree of configurations where the root is the -// first file and its children are the configurations it has imported. -type configTree struct { - Path string - Config *Config - Children []*configTree -} - -// Flatten flattens the entire tree down to a single merged Config -// structure. -func (t *configTree) Flatten() (*Config, error) { - // No children is easy: we're already merged! - if len(t.Children) == 0 { - return t.Config, nil - } - - // Depth-first, merge all the children first. - childConfigs := make([]*Config, len(t.Children)) - for i, ct := range t.Children { - c, err := ct.Flatten() - if err != nil { - return nil, err - } - - childConfigs[i] = c - } - - // Merge all the children in order - config := childConfigs[0] - childConfigs = childConfigs[1:] - for _, config2 := range childConfigs { - var err error - config, err = Merge(config, config2) - if err != nil { - return nil, err - } - } - - // Merge the final merged child config with our own - return Merge(config, t.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go deleted file mode 100644 index 37ec11a..0000000 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ /dev/null @@ -1,113 +0,0 @@ -package config - -import ( - "fmt" - "io" -) - -// configurable is an interface that must be implemented by any configuration -// formats of Terraform in order to return a *Config. -type configurable interface { - Config() (*Config, error) -} - -// importTree is the result of the first-pass load of the configuration -// files. It is a tree of raw configurables and then any children (their -// imports). -// -// An importTree can be turned into a configTree. -type importTree struct { - Path string - Raw configurable - Children []*importTree -} - -// This is the function type that must be implemented by the configuration -// file loader to turn a single file into a configurable and any additional -// imports. -type fileLoaderFunc func(path string) (configurable, []string, error) - -// loadTree takes a single file and loads the entire importTree for that -// file. This function detects what kind of configuration file it is an -// executes the proper fileLoaderFunc. -func loadTree(root string) (*importTree, error) { - var f fileLoaderFunc - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: - } - - if f == nil { - return nil, fmt.Errorf( - "%s: unknown configuration format. Use '.tf' or '.tf.json' extension", - root) - } - - c, imps, err := f(root) - if err != nil { - return nil, err - } - - children := make([]*importTree, len(imps)) - for i, imp := range imps { - t, err := loadTree(imp) - if err != nil { - return nil, err - } - - children[i] = t - } - - return &importTree{ - Path: root, - Raw: c, - Children: children, - }, nil -} - -// Close releases any resources we might be holding open for the importTree. -// -// This can safely be called even while ConfigTree results are alive. The -// importTree is not bound to these. -func (t *importTree) Close() error { - if c, ok := t.Raw.(io.Closer); ok { - c.Close() - } - for _, ct := range t.Children { - ct.Close() - } - - return nil -} - -// ConfigTree traverses the importTree and turns each node into a *Config -// object, ultimately returning a *configTree. -func (t *importTree) ConfigTree() (*configTree, error) { - config, err := t.Raw.Config() - if err != nil { - return nil, fmt.Errorf( - "Error loading %s: %s", - t.Path, - err) - } - - // Build our result - result := &configTree{ - Path: t.Path, - Config: config, - } - - // Build the config trees for the children - result.Children = make([]*configTree, len(t.Children)) - for i, ct := range t.Children { - t, err := ct.ConfigTree() - if err != nil { - return nil, err - } - - result.Children[i] = t - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go deleted file mode 100644 index bbb3555..0000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ /dev/null @@ -1,386 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/hil/ast" -) - -// An InterpolatedVariable is a variable reference within an interpolation. -// -// Implementations of this interface represents various sources where -// variables can come from: user variables, resources, etc. -type InterpolatedVariable interface { - FullKey() string -} - -// CountVariable is a variable for referencing information about -// the count. -type CountVariable struct { - Type CountValueType - key string -} - -// CountValueType is the type of the count variable that is referenced. -type CountValueType byte - -const ( - CountValueInvalid CountValueType = iota - CountValueIndex -) - -// A ModuleVariable is a variable that is referencing the output -// of a module, such as "${module.foo.bar}" -type ModuleVariable struct { - Name string - Field string - key string -} - -// A PathVariable is a variable that references path information about the -// module. -type PathVariable struct { - Type PathValueType - key string -} - -type PathValueType byte - -const ( - PathValueInvalid PathValueType = iota - PathValueCwd - PathValueModule - PathValueRoot -) - -// A ResourceVariable is a variable that is referencing the field -// of a resource, such as "${aws_instance.foo.ami}" -type ResourceVariable struct { - Mode ResourceMode - Type string // Resource type, i.e. "aws_instance" - Name string // Resource name - Field string // Resource field - - Multi bool // True if multi-variable: aws_instance.foo.*.id - Index int // Index for multi-variable: aws_instance.foo.1.id == 1 - - key string -} - -// SelfVariable is a variable that is referencing the same resource -// it is running on: "${self.address}" -type SelfVariable struct { - Field string - - key string -} - -// SimpleVariable is an unprefixed variable, which can show up when users have -// strings they are passing down to resources that use interpolation -// internally. The template_file resource is an example of this. -type SimpleVariable struct { - Key string -} - -// TerraformVariable is a "terraform."-prefixed variable used to access -// metadata about the Terraform run. -type TerraformVariable struct { - Field string - key string -} - -// A UserVariable is a variable that is referencing a user variable -// that is inputted from outside the configuration. This looks like -// "${var.foo}" -type UserVariable struct { - Name string - Elem string - - key string -} - -func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { - if strings.HasPrefix(v, "count.") { - return NewCountVariable(v) - } else if strings.HasPrefix(v, "path.") { - return NewPathVariable(v) - } else if strings.HasPrefix(v, "self.") { - return NewSelfVariable(v) - } else if strings.HasPrefix(v, "terraform.") { - return NewTerraformVariable(v) - } else if strings.HasPrefix(v, "var.") { - return NewUserVariable(v) - } else if strings.HasPrefix(v, "module.") { - return NewModuleVariable(v) - } else if !strings.ContainsRune(v, '.') { - return NewSimpleVariable(v) - } else { - return NewResourceVariable(v) - } -} - -func NewCountVariable(key string) (*CountVariable, error) { - var fieldType CountValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "index": - fieldType = CountValueIndex - } - - return &CountVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (c *CountVariable) FullKey() string { - return c.key -} - -func NewModuleVariable(key string) (*ModuleVariable, error) { - parts := strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: module variables must be three parts: module.name.attr", - key) - } - - return &ModuleVariable{ - Name: parts[1], - Field: parts[2], - key: key, - }, nil -} - -func (v *ModuleVariable) FullKey() string { - return v.key -} - -func (v *ModuleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewPathVariable(key string) (*PathVariable, error) { - var fieldType PathValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "cwd": - fieldType = PathValueCwd - case "module": - fieldType = PathValueModule - case "root": - fieldType = PathValueRoot - } - - return &PathVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (v *PathVariable) FullKey() string { - return v.key -} - -func NewResourceVariable(key string) (*ResourceVariable, error) { - var mode ResourceMode - var parts []string - if strings.HasPrefix(key, "data.") { - mode = DataResourceMode - parts = strings.SplitN(key, ".", 4) - if len(parts) < 4 { - return nil, fmt.Errorf( - "%s: data variables must be four parts: data.TYPE.NAME.ATTR", - key) - } - - // Don't actually need the "data." prefix for parsing, since it's - // always constant. - parts = parts[1:] - } else { - mode = ManagedResourceMode - parts = strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: resource variables must be three parts: TYPE.NAME.ATTR", - key) - } - } - - field := parts[2] - multi := false - var index int - - if idx := strings.Index(field, "."); idx != -1 { - indexStr := field[:idx] - multi = indexStr == "*" - index = -1 - - if !multi { - indexInt, err := strconv.ParseInt(indexStr, 0, 0) - if err == nil { - multi = true - index = int(indexInt) - } - } - - if multi { - field = field[idx+1:] - } - } - - return &ResourceVariable{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Field: field, - Multi: multi, - Index: index, - key: key, - }, nil -} - -func (v *ResourceVariable) ResourceId() string { - switch v.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", v.Type, v.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", v.Type, v.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", v.Mode)) - } -} - -func (v *ResourceVariable) FullKey() string { - return v.key -} - -func NewSelfVariable(key string) (*SelfVariable, error) { - field := key[len("self."):] - - return &SelfVariable{ - Field: field, - - key: key, - }, nil -} - -func (v *SelfVariable) FullKey() string { - return v.key -} - -func (v *SelfVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{key}, nil -} - -func (v *SimpleVariable) FullKey() string { - return v.Key -} - -func (v *SimpleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewTerraformVariable(key string) (*TerraformVariable, error) { - field := key[len("terraform."):] - return &TerraformVariable{ - Field: field, - key: key, - }, nil -} - -func (v *TerraformVariable) FullKey() string { - return v.key -} - -func (v *TerraformVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewUserVariable(key string) (*UserVariable, error) { - name := key[len("var."):] - elem := "" - if idx := strings.Index(name, "."); idx > -1 { - elem = name[idx+1:] - name = name[:idx] - } - - if len(elem) > 0 { - return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem) - } - - return &UserVariable{ - key: key, - - Name: name, - Elem: elem, - }, nil -} - -func (v *UserVariable) FullKey() string { - return v.key -} - -func (v *UserVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -// DetectVariables takes an AST root and returns all the interpolated -// variables that are detected in the AST tree. -func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { - var result []InterpolatedVariable - var resultErr error - - // Visitor callback - fn := func(n ast.Node) ast.Node { - if resultErr != nil { - return n - } - - switch vn := n.(type) { - case *ast.VariableAccess: - v, err := NewInterpolatedVariable(vn.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - case *ast.Index: - if va, ok := vn.Target.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - if va, ok := vn.Key.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - default: - return n - } - - return n - } - - // Visitor pattern - root.Accept(fn) - - if resultErr != nil { - return nil, resultErr - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go deleted file mode 100644 index a298cf2..0000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ /dev/null @@ -1,1455 +0,0 @@ -package config - -import ( - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "math" - "net" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/go-homedir" - "golang.org/x/crypto/bcrypt" -) - -// stringSliceToVariableValue converts a string slice into the value -// required to be returned from interpolation functions which return -// TypeList. -func stringSliceToVariableValue(values []string) []ast.Variable { - output := make([]ast.Variable, len(values)) - for index, value := range values { - output[index] = ast.Variable{ - Type: ast.TypeString, - Value: value, - } - } - return output -} - -func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { - output := make([]string, len(values)) - for index, value := range values { - if value.Type != ast.TypeString { - return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String()) - } - output[index] = value.Value.(string) - } - return output, nil -} - -// Funcs is the mapping of built-in functions for configuration. -func Funcs() map[string]ast.Function { - return map[string]ast.Function{ - "basename": interpolationFuncBasename(), - "base64decode": interpolationFuncBase64Decode(), - "base64encode": interpolationFuncBase64Encode(), - "base64sha256": interpolationFuncBase64Sha256(), - "base64sha512": interpolationFuncBase64Sha512(), - "bcrypt": interpolationFuncBcrypt(), - "ceil": interpolationFuncCeil(), - "chomp": interpolationFuncChomp(), - "cidrhost": interpolationFuncCidrHost(), - "cidrnetmask": interpolationFuncCidrNetmask(), - "cidrsubnet": interpolationFuncCidrSubnet(), - "coalesce": interpolationFuncCoalesce(), - "coalescelist": interpolationFuncCoalesceList(), - "compact": interpolationFuncCompact(), - "concat": interpolationFuncConcat(), - "contains": interpolationFuncContains(), - "dirname": interpolationFuncDirname(), - "distinct": interpolationFuncDistinct(), - "element": interpolationFuncElement(), - "file": interpolationFuncFile(), - "matchkeys": interpolationFuncMatchKeys(), - "floor": interpolationFuncFloor(), - "format": interpolationFuncFormat(), - "formatlist": interpolationFuncFormatList(), - "index": interpolationFuncIndex(), - "join": interpolationFuncJoin(), - "jsonencode": interpolationFuncJSONEncode(), - "length": interpolationFuncLength(), - "list": interpolationFuncList(), - "log": interpolationFuncLog(), - "lower": interpolationFuncLower(), - "map": interpolationFuncMap(), - "max": interpolationFuncMax(), - "md5": interpolationFuncMd5(), - "merge": interpolationFuncMerge(), - "min": interpolationFuncMin(), - "pathexpand": interpolationFuncPathExpand(), - "pow": interpolationFuncPow(), - "uuid": interpolationFuncUUID(), - "replace": interpolationFuncReplace(), - "sha1": interpolationFuncSha1(), - "sha256": interpolationFuncSha256(), - "sha512": interpolationFuncSha512(), - "signum": interpolationFuncSignum(), - "slice": interpolationFuncSlice(), - "sort": interpolationFuncSort(), - "split": interpolationFuncSplit(), - "substr": interpolationFuncSubstr(), - "timestamp": interpolationFuncTimestamp(), - "title": interpolationFuncTitle(), - "trimspace": interpolationFuncTrimSpace(), - "upper": interpolationFuncUpper(), - "zipmap": interpolationFuncZipMap(), - } -} - -// interpolationFuncList creates a list from the parameters passed -// to it. -func interpolationFuncList() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{}, - ReturnType: ast.TypeList, - Variadic: true, - VariadicType: ast.TypeAny, - Callback: func(args []interface{}) (interface{}, error) { - var outputList []ast.Variable - - for i, val := range args { - switch v := val.(type) { - case string: - outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v}) - case []ast.Variable: - outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v}) - case map[string]ast.Variable: - outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v}) - default: - return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i) - } - } - - // we don't support heterogeneous types, so make sure all types match the first - if len(outputList) > 0 { - firstType := outputList[0].Type - for i, v := range outputList[1:] { - if v.Type != firstType { - return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1) - } - } - } - - return outputList, nil - }, - } -} - -// interpolationFuncMap creates a map from the parameters passed -// to it. -func interpolationFuncMap() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{}, - ReturnType: ast.TypeMap, - Variadic: true, - VariadicType: ast.TypeAny, - Callback: func(args []interface{}) (interface{}, error) { - outputMap := make(map[string]ast.Variable) - - if len(args)%2 != 0 { - return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args)) - } - - var firstType *ast.Type - for i := 0; i < len(args); i += 2 { - key, ok := args[i].(string) - if !ok { - return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1) - } - val := args[i+1] - variable, err := hil.InterfaceToVariable(val) - if err != nil { - return nil, err - } - // Enforce map type homogeneity - if firstType == nil { - firstType = &variable.Type - } else if variable.Type != *firstType { - return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable()) - } - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return outputMap, nil - }, - } -} - -// interpolationFuncCompact strips a list of multi-variable values -// (e.g. as returned by "split") of any empty strings. -func interpolationFuncCompact() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList}, - ReturnType: ast.TypeList, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - inputList := args[0].([]ast.Variable) - - var outputList []string - for _, val := range inputList { - strVal, ok := val.Value.(string) - if !ok { - return nil, fmt.Errorf( - "compact() may only be used with flat lists, this list contains elements of %s", - val.Type.Printable()) - } - if strVal == "" { - continue - } - - outputList = append(outputList, strVal) - } - return stringSliceToVariableValue(outputList), nil - }, - } -} - -// interpolationFuncCidrHost implements the "cidrhost" function that -// fills in the host part of a CIDR range address to create a single -// host address -func interpolationFuncCidrHost() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeString, // starting CIDR mask - ast.TypeInt, // host number to insert - }, - ReturnType: ast.TypeString, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - hostNum := args[1].(int) - _, network, err := net.ParseCIDR(args[0].(string)) - if err != nil { - return nil, fmt.Errorf("invalid CIDR expression: %s", err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return nil, err - } - - return ip.String(), nil - }, - } -} - -// interpolationFuncCidrNetmask implements the "cidrnetmask" function -// that returns the subnet mask in IP address notation. -func interpolationFuncCidrNetmask() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeString, // CIDR mask - }, - ReturnType: ast.TypeString, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - _, network, err := net.ParseCIDR(args[0].(string)) - if err != nil { - return nil, fmt.Errorf("invalid CIDR expression: %s", err) - } - - return net.IP(network.Mask).String(), nil - }, - } -} - -// interpolationFuncCidrSubnet implements the "cidrsubnet" function that -// adds an additional subnet of the given length onto an existing -// IP block expressed in CIDR notation. -func interpolationFuncCidrSubnet() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeString, // starting CIDR mask - ast.TypeInt, // number of bits to extend the prefix - ast.TypeInt, // network number to append to the prefix - }, - ReturnType: ast.TypeString, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - extraBits := args[1].(int) - subnetNum := args[2].(int) - _, network, err := net.ParseCIDR(args[0].(string)) - if err != nil { - return nil, fmt.Errorf("invalid CIDR expression: %s", err) - } - - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if extraBits > 32 { - return nil, fmt.Errorf("may not extend prefix by more than 32 bits") - } - - newNetwork, err := cidr.Subnet(network, extraBits, subnetNum) - if err != nil { - return nil, err - } - - return newNetwork.String(), nil - }, - } -} - -// interpolationFuncCoalesce implements the "coalesce" function that -// returns the first non null / empty string from the provided input -func interpolationFuncCoalesce() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Variadic: true, - VariadicType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - if len(args) < 2 { - return nil, fmt.Errorf("must provide at least two arguments") - } - for _, arg := range args { - argument := arg.(string) - - if argument != "" { - return argument, nil - } - } - return "", nil - }, - } -} - -// interpolationFuncCoalesceList implements the "coalescelist" function that -// returns the first non empty list from the provided input -func interpolationFuncCoalesceList() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList}, - ReturnType: ast.TypeList, - Variadic: true, - VariadicType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - if len(args) < 2 { - return nil, fmt.Errorf("must provide at least two arguments") - } - for _, arg := range args { - argument := arg.([]ast.Variable) - - if len(argument) > 0 { - return argument, nil - } - } - return make([]ast.Variable, 0), nil - }, - } -} - -// interpolationFuncContains returns true if an element is in the list -// and return false otherwise -func interpolationFuncContains() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - _, err := interpolationFuncIndex().Callback(args) - if err != nil { - return false, nil - } - return true, nil - }, - } -} - -// interpolationFuncConcat implements the "concat" function that concatenates -// multiple lists. -func interpolationFuncConcat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList}, - ReturnType: ast.TypeList, - Variadic: true, - VariadicType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - var outputList []ast.Variable - - for _, arg := range args { - for _, v := range arg.([]ast.Variable) { - switch v.Type { - case ast.TypeString: - outputList = append(outputList, v) - case ast.TypeList: - outputList = append(outputList, v) - case ast.TypeMap: - outputList = append(outputList, v) - default: - return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) - } - } - } - - // we don't support heterogeneous types, so make sure all types match the first - if len(outputList) > 0 { - firstType := outputList[0].Type - for _, v := range outputList[1:] { - if v.Type != firstType { - return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable()) - } - } - } - - return outputList, nil - }, - } -} - -// interpolationFuncPow returns base x exponential of y. -func interpolationFuncPow() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return math.Pow(args[0].(float64), args[1].(float64)), nil - }, - } -} - -// interpolationFuncFile implements the "file" function that allows -// loading contents from a file. -func interpolationFuncFile() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - path, err := homedir.Expand(args[0].(string)) - if err != nil { - return "", err - } - data, err := ioutil.ReadFile(path) - if err != nil { - return "", err - } - - return string(data), nil - }, - } -} - -// interpolationFuncFormat implements the "format" function that does -// string formatting. -func interpolationFuncFormat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - Variadic: true, - VariadicType: ast.TypeAny, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - format := args[0].(string) - return fmt.Sprintf(format, args[1:]...), nil - }, - } -} - -// interpolationFuncMax returns the maximum of the numeric arguments -func interpolationFuncMax() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeFloat, - Variadic: true, - VariadicType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - max := args[0].(float64) - - for i := 1; i < len(args); i++ { - max = math.Max(max, args[i].(float64)) - } - - return max, nil - }, - } -} - -// interpolationFuncMin returns the minimum of the numeric arguments -func interpolationFuncMin() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeFloat, - Variadic: true, - VariadicType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - min := args[0].(float64) - - for i := 1; i < len(args); i++ { - min = math.Min(min, args[i].(float64)) - } - - return min, nil - }, - } -} - -// interpolationFuncPathExpand will expand any `~`'s found with the full file path -func interpolationFuncPathExpand() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return homedir.Expand(args[0].(string)) - }, - } -} - -// interpolationFuncCeil returns the the least integer value greater than or equal to the argument -func interpolationFuncCeil() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(math.Ceil(args[0].(float64))), nil - }, - } -} - -// interpolationFuncLog returns the logarithnm. -func interpolationFuncLog() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil - }, - } -} - -// interpolationFuncChomp removes trailing newlines from the given string -func interpolationFuncChomp() ast.Function { - newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return newlines.ReplaceAllString(args[0].(string), ""), nil - }, - } -} - -// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument -func interpolationFuncFloor() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(math.Floor(args[0].(float64))), nil - }, - } -} - -func interpolationFuncZipMap() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeList, // Keys - ast.TypeList, // Values - }, - ReturnType: ast.TypeMap, - Callback: func(args []interface{}) (interface{}, error) { - keys := args[0].([]ast.Variable) - values := args[1].([]ast.Variable) - - if len(keys) != len(values) { - return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)", - len(keys), len(values)) - } - - for i, val := range keys { - if val.Type != ast.TypeString { - return nil, fmt.Errorf("keys must be strings. value at position %d is %s", - i, val.Type.Printable()) - } - } - - result := map[string]ast.Variable{} - for i := 0; i < len(keys); i++ { - result[keys[i].Value.(string)] = values[i] - } - - return result, nil - }, - } -} - -// interpolationFuncFormatList implements the "formatlist" function that does -// string formatting on lists. -func interpolationFuncFormatList() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeAny}, - Variadic: true, - VariadicType: ast.TypeAny, - ReturnType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - // Make a copy of the variadic part of args - // to avoid modifying the original. - varargs := make([]interface{}, len(args)-1) - copy(varargs, args[1:]) - - // Verify we have some arguments - if len(varargs) == 0 { - return nil, fmt.Errorf("no arguments to formatlist") - } - - // Convert arguments that are lists into slices. - // Confirm along the way that all lists have the same length (n). - var n int - listSeen := false - for i := 1; i < len(args); i++ { - s, ok := args[i].([]ast.Variable) - if !ok { - continue - } - - // Mark that we've seen at least one list - listSeen = true - - // Convert the ast.Variable to a slice of strings - parts, err := listVariableValueToStringSlice(s) - if err != nil { - return nil, err - } - - // otherwise the list is sent down to be indexed - varargs[i-1] = parts - - // Check length - if n == 0 { - // first list we've seen - n = len(parts) - continue - } - if n != len(parts) { - return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts)) - } - } - - // If we didn't see a list this is an error because we - // can't determine the return value length. - if !listSeen { - return nil, fmt.Errorf( - "formatlist requires at least one list argument") - } - - // Do the formatting. - format := args[0].(string) - - // Generate a list of formatted strings. - list := make([]string, n) - fmtargs := make([]interface{}, len(varargs)) - for i := 0; i < n; i++ { - for j, arg := range varargs { - switch arg := arg.(type) { - default: - fmtargs[j] = arg - case []string: - fmtargs[j] = arg[i] - } - } - list[i] = fmt.Sprintf(format, fmtargs...) - } - return stringSliceToVariableValue(list), nil - }, - } -} - -// interpolationFuncIndex implements the "index" function that allows one to -// find the index of a specific element in a list -func interpolationFuncIndex() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - haystack := args[0].([]ast.Variable) - needle := args[1].(string) - for index, element := range haystack { - if needle == element.Value { - return index, nil - } - } - return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack) - }, - } -} - -// interpolationFuncBasename implements the "dirname" function. -func interpolationFuncDirname() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return filepath.Dir(args[0].(string)), nil - }, - } -} - -// interpolationFuncDistinct implements the "distinct" function that -// removes duplicate elements from a list. -func interpolationFuncDistinct() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList}, - ReturnType: ast.TypeList, - Variadic: true, - VariadicType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - var list []string - - if len(args) != 1 { - return nil, fmt.Errorf("accepts only one argument.") - } - - if argument, ok := args[0].([]ast.Variable); ok { - for _, element := range argument { - if element.Type != ast.TypeString { - return nil, fmt.Errorf( - "only works for flat lists, this list contains elements of %s", - element.Type.Printable()) - } - list = appendIfMissing(list, element.Value.(string)) - } - } - - return stringSliceToVariableValue(list), nil - }, - } -} - -// helper function to add an element to a list, if it does not already exsit -func appendIfMissing(slice []string, element string) []string { - for _, ele := range slice { - if ele == element { - return slice - } - } - return append(slice, element) -} - -// for two lists `keys` and `values` of equal length, returns all elements -// from `values` where the corresponding element from `keys` is in `searchset`. -func interpolationFuncMatchKeys() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList}, - ReturnType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - output := make([]ast.Variable, 0) - - values, _ := args[0].([]ast.Variable) - keys, _ := args[1].([]ast.Variable) - searchset, _ := args[2].([]ast.Variable) - - if len(keys) != len(values) { - return nil, fmt.Errorf("length of keys and values should be equal") - } - - for i, key := range keys { - for _, search := range searchset { - if res, err := compareSimpleVariables(key, search); err != nil { - return nil, err - } else if res == true { - output = append(output, values[i]) - break - } - } - } - // if searchset is empty, then output is an empty list as well. - // if we haven't matched any key, then output is an empty list. - return output, nil - }, - } -} - -// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap -func compareSimpleVariables(a, b ast.Variable) (bool, error) { - if a.Type != b.Type { - return false, fmt.Errorf( - "won't compare items of different types %s and %s", - a.Type.Printable(), b.Type.Printable()) - } - switch a.Type { - case ast.TypeString: - return a.Value.(string) == b.Value.(string), nil - default: - return false, fmt.Errorf( - "can't compare items of type %s", - a.Type.Printable()) - } -} - -// interpolationFuncJoin implements the "join" function that allows -// multi-variable values to be joined by some character. -func interpolationFuncJoin() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - Variadic: true, - VariadicType: ast.TypeList, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - var list []string - - if len(args) < 2 { - return nil, fmt.Errorf("not enough arguments to join()") - } - - for _, arg := range args[1:] { - for _, part := range arg.([]ast.Variable) { - if part.Type != ast.TypeString { - return nil, fmt.Errorf( - "only works on flat lists, this list contains elements of %s", - part.Type.Printable()) - } - list = append(list, part.Value.(string)) - } - } - - return strings.Join(list, args[0].(string)), nil - }, - } -} - -// interpolationFuncJSONEncode implements the "jsonencode" function that encodes -// a string, list, or map as its JSON representation. For now, values in the -// list or map may only be strings. -func interpolationFuncJSONEncode() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeAny}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - var toEncode interface{} - - switch typedArg := args[0].(type) { - case string: - toEncode = typedArg - - case []ast.Variable: - // We preallocate the list here. Note that it's important that in - // the length 0 case, we have an empty list rather than nil, as - // they encode differently. - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? - strings := make([]string, len(typedArg)) - - for i, v := range typedArg { - if v.Type != ast.TypeString { - return "", fmt.Errorf("list elements must be strings") - } - strings[i] = v.Value.(string) - } - toEncode = strings - - case map[string]ast.Variable: - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? - stringMap := make(map[string]string) - for k, v := range typedArg { - if v.Type != ast.TypeString { - return "", fmt.Errorf("map values must be strings") - } - stringMap[k] = v.Value.(string) - } - toEncode = stringMap - - default: - return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0]) - } - - jEnc, err := json.Marshal(toEncode) - if err != nil { - return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) - } - return string(jEnc), nil - }, - } -} - -// interpolationFuncReplace implements the "replace" function that does -// string replacement. -func interpolationFuncReplace() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - search := args[1].(string) - replace := args[2].(string) - - // We search/replace using a regexp if the string is surrounded - // in forward slashes. - if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' { - re, err := regexp.Compile(search[1 : len(search)-1]) - if err != nil { - return nil, err - } - - return re.ReplaceAllString(s, replace), nil - } - - return strings.Replace(s, search, replace, -1), nil - }, - } -} - -func interpolationFuncLength() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeAny}, - ReturnType: ast.TypeInt, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - subject := args[0] - - switch typedSubject := subject.(type) { - case string: - return len(typedSubject), nil - case []ast.Variable: - return len(typedSubject), nil - case map[string]ast.Variable: - return len(typedSubject), nil - } - - return 0, fmt.Errorf("arguments to length() must be a string, list, or map") - }, - } -} - -func interpolationFuncSignum() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeInt, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - num := args[0].(int) - switch { - case num < 0: - return -1, nil - case num > 0: - return +1, nil - default: - return 0, nil - } - }, - } -} - -// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive. -func interpolationFuncSlice() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeList, // inputList - ast.TypeInt, // from - ast.TypeInt, // to - }, - ReturnType: ast.TypeList, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - inputList := args[0].([]ast.Variable) - from := args[1].(int) - to := args[2].(int) - - if from < 0 { - return nil, fmt.Errorf("from index must be >= 0") - } - if to > len(inputList) { - return nil, fmt.Errorf("to index must be <= length of the input list") - } - if from > to { - return nil, fmt.Errorf("from index must be <= to index") - } - - var outputList []ast.Variable - for i, val := range inputList { - if i >= from && i < to { - outputList = append(outputList, val) - } - } - return outputList, nil - }, - } -} - -// interpolationFuncSort sorts a list of a strings lexographically -func interpolationFuncSort() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList}, - ReturnType: ast.TypeList, - Variadic: false, - Callback: func(args []interface{}) (interface{}, error) { - inputList := args[0].([]ast.Variable) - - // Ensure that all the list members are strings and - // create a string slice from them - members := make([]string, len(inputList)) - for i, val := range inputList { - if val.Type != ast.TypeString { - return nil, fmt.Errorf( - "sort() may only be used with lists of strings - %s at index %d", - val.Type.String(), i) - } - - members[i] = val.Value.(string) - } - - sort.Strings(members) - return stringSliceToVariableValue(members), nil - }, - } -} - -// interpolationFuncSplit implements the "split" function that allows -// strings to split into multi-variable values -func interpolationFuncSplit() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, - ReturnType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - sep := args[0].(string) - s := args[1].(string) - elements := strings.Split(s, sep) - return stringSliceToVariableValue(elements), nil - }, - } -} - -// interpolationFuncLookup implements the "lookup" function that allows -// dynamic lookups of map types within a Terraform configuration. -func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString}, - ReturnType: ast.TypeString, - Variadic: true, - VariadicType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - defaultValue := "" - defaultValueSet := false - if len(args) > 2 { - defaultValue = args[2].(string) - defaultValueSet = true - } - if len(args) > 3 { - return "", fmt.Errorf("lookup() takes no more than three arguments") - } - index := args[1].(string) - mapVar := args[0].(map[string]ast.Variable) - - v, ok := mapVar[index] - if !ok { - if defaultValueSet { - return defaultValue, nil - } else { - return "", fmt.Errorf( - "lookup failed to find '%s'", - args[1].(string)) - } - } - if v.Type != ast.TypeString { - return nil, fmt.Errorf( - "lookup() may only be used with flat maps, this map contains elements of %s", - v.Type.Printable()) - } - - return v.Value.(string), nil - }, - } -} - -// interpolationFuncElement implements the "element" function that allows -// a specific index to be looked up in a multi-variable value. Note that this will -// wrap if the index is larger than the number of elements in the multi-variable value. -func interpolationFuncElement() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - list := args[0].([]ast.Variable) - if len(list) == 0 { - return nil, fmt.Errorf("element() may not be used with an empty list") - } - - index, err := strconv.Atoi(args[1].(string)) - if err != nil || index < 0 { - return "", fmt.Errorf( - "invalid number for index, got %s", args[1]) - } - - resolvedIndex := index % len(list) - - v := list[resolvedIndex] - if v.Type != ast.TypeString { - return nil, fmt.Errorf( - "element() may only be used with flat lists, this list contains elements of %s", - v.Type.Printable()) - } - return v.Value, nil - }, - } -} - -// interpolationFuncKeys implements the "keys" function that yields a list of -// keys of map types within a Terraform configuration. -func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeMap}, - ReturnType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - mapVar := args[0].(map[string]ast.Variable) - keys := make([]string, 0) - - for k, _ := range mapVar { - keys = append(keys, k) - } - - sort.Strings(keys) - - // Keys are guaranteed to be strings - return stringSliceToVariableValue(keys), nil - }, - } -} - -// interpolationFuncValues implements the "values" function that yields a list of -// keys of map types within a Terraform configuration. -func interpolationFuncValues(vs map[string]ast.Variable) ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeMap}, - ReturnType: ast.TypeList, - Callback: func(args []interface{}) (interface{}, error) { - mapVar := args[0].(map[string]ast.Variable) - keys := make([]string, 0) - - for k, _ := range mapVar { - keys = append(keys, k) - } - - sort.Strings(keys) - - values := make([]string, len(keys)) - for index, key := range keys { - if value, ok := mapVar[key].Value.(string); ok { - values[index] = value - } else { - return "", fmt.Errorf("values(): %q has element with bad type %s", - key, mapVar[key].Type) - } - } - - variable, err := hil.InterfaceToVariable(values) - if err != nil { - return nil, err - } - - return variable.Value, nil - }, - } -} - -// interpolationFuncBasename implements the "basename" function. -func interpolationFuncBasename() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return filepath.Base(args[0].(string)), nil - }, - } -} - -// interpolationFuncBase64Encode implements the "base64encode" function that -// allows Base64 encoding. -func interpolationFuncBase64Encode() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - return base64.StdEncoding.EncodeToString([]byte(s)), nil - }, - } -} - -// interpolationFuncBase64Decode implements the "base64decode" function that -// allows Base64 decoding. -func interpolationFuncBase64Decode() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - sDec, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", fmt.Errorf("failed to decode base64 data '%s'", s) - } - return string(sDec), nil - }, - } -} - -// interpolationFuncLower implements the "lower" function that does -// string lower casing. -func interpolationFuncLower() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - toLower := args[0].(string) - return strings.ToLower(toLower), nil - }, - } -} - -func interpolationFuncMd5() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := md5.New() - h.Write([]byte(s)) - hash := hex.EncodeToString(h.Sum(nil)) - return hash, nil - }, - } -} - -func interpolationFuncMerge() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeMap}, - ReturnType: ast.TypeMap, - Variadic: true, - VariadicType: ast.TypeMap, - Callback: func(args []interface{}) (interface{}, error) { - outputMap := make(map[string]ast.Variable) - - for _, arg := range args { - for k, v := range arg.(map[string]ast.Variable) { - outputMap[k] = v - } - } - - return outputMap, nil - }, - } -} - -// interpolationFuncUpper implements the "upper" function that does -// string upper casing. -func interpolationFuncUpper() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - toUpper := args[0].(string) - return strings.ToUpper(toUpper), nil - }, - } -} - -func interpolationFuncSha1() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := sha1.New() - h.Write([]byte(s)) - hash := hex.EncodeToString(h.Sum(nil)) - return hash, nil - }, - } -} - -// hexadecimal representation of sha256 sum -func interpolationFuncSha256() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := sha256.New() - h.Write([]byte(s)) - hash := hex.EncodeToString(h.Sum(nil)) - return hash, nil - }, - } -} - -func interpolationFuncSha512() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := sha512.New() - h.Write([]byte(s)) - hash := hex.EncodeToString(h.Sum(nil)) - return hash, nil - }, - } -} - -func interpolationFuncTrimSpace() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - trimSpace := args[0].(string) - return strings.TrimSpace(trimSpace), nil - }, - } -} - -func interpolationFuncBase64Sha256() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := sha256.New() - h.Write([]byte(s)) - shaSum := h.Sum(nil) - encoded := base64.StdEncoding.EncodeToString(shaSum[:]) - return encoded, nil - }, - } -} - -func interpolationFuncBase64Sha512() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - s := args[0].(string) - h := sha512.New() - h.Write([]byte(s)) - shaSum := h.Sum(nil) - encoded := base64.StdEncoding.EncodeToString(shaSum[:]) - return encoded, nil - }, - } -} - -func interpolationFuncBcrypt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - Variadic: true, - VariadicType: ast.TypeString, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - defaultCost := 10 - - if len(args) > 1 { - costStr := args[1].(string) - cost, err := strconv.Atoi(costStr) - if err != nil { - return "", err - } - - defaultCost = cost - } - - if len(args) > 2 { - return "", fmt.Errorf("bcrypt() takes no more than two arguments") - } - - input := args[0].(string) - out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) - if err != nil { - return "", fmt.Errorf("error occured generating password %s", err.Error()) - } - - return string(out), nil - }, - } -} - -func interpolationFuncUUID() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return uuid.GenerateUUID() - }, - } -} - -// interpolationFuncTimestamp -func interpolationFuncTimestamp() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return time.Now().UTC().Format(time.RFC3339), nil - }, - } -} - -// interpolationFuncTitle implements the "title" function that returns a copy of the -// string in which first characters of all the words are capitalized. -func interpolationFuncTitle() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - toTitle := args[0].(string) - return strings.Title(toTitle), nil - }, - } -} - -// interpolationFuncSubstr implements the "substr" function that allows strings -// to be truncated. -func interpolationFuncSubstr() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ - ast.TypeString, // input string - ast.TypeInt, // offset - ast.TypeInt, // length - }, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - str := args[0].(string) - offset := args[1].(int) - length := args[2].(int) - - // Interpret a negative offset as being equivalent to a positive - // offset taken from the end of the string. - if offset < 0 { - offset += len(str) - } - - // Interpret a length of `-1` as indicating that the substring - // should start at `offset` and continue until the end of the - // string. Any other negative length (other than `-1`) is invalid. - if length == -1 { - length = len(str) - } else if length >= 0 { - length += offset - } else { - return nil, fmt.Errorf("length should be a non-negative integer") - } - - if offset > len(str) { - return nil, fmt.Errorf("offset cannot be larger than the length of the string") - } - - if length > len(str) { - return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string") - } - - return str[offset:length], nil - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go deleted file mode 100644 index ead3d10..0000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ /dev/null @@ -1,283 +0,0 @@ -package config - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/reflectwalk" -) - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - // F is the function to call for every interpolation. It can be nil. - // - // If Replace is true, then the return value of F will be used to - // replace the interpolation. - F interpolationWalkerFunc - Replace bool - - // ContextF is an advanced version of F that also receives the - // location of where it is in the structure. This lets you do - // context-aware validation. - ContextF interpolationWalkerContextFunc - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex []int - unknownKeys []string -} - -// interpolationWalkerFunc is the callback called by interpolationWalk. -// It is called with any interpolation found. It should return a value -// to replace the interpolation with, along with any errors. -// -// If Replace is set to false in interpolationWalker, then the replace -// value can be anything as it will have no effect. -type interpolationWalkerFunc func(ast.Node) (interface{}, error) - -// interpolationWalkerContextFunc is called by interpolationWalk if -// ContextF is set. This receives both the interpolation and the location -// where the interpolation is. -// -// This callback can be used to validate the location of the interpolation -// within the configuration. -type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - - if l := len(w.sliceIndex); l > 0 { - w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) - } else { - w.key = append(w.key, k.String()) - } - - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = append(w.sliceIndex, i) - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := hil.Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.ContextF != nil { - w.ContextF(w.loc, astRoot) - } - - if w.F == nil { - return nil - } - - replaceVal, err := w.F(astRoot) - if err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if w.Replace { - // We need to determine if we need to remove this element - // if the result contains any "UnknownVariableValue" which is - // set if it is computed. This behavior is different if we're - // splitting (in a SliceElem) or not. - remove := false - if w.loc == reflectwalk.SliceElem { - switch typedReplaceVal := replaceVal.(type) { - case string: - if typedReplaceVal == UnknownVariableValue { - remove = true - } - case []interface{}: - if hasUnknownValue(typedReplaceVal) { - remove = true - } - } - } else if replaceVal == UnknownVariableValue { - remove = true - } - - if remove { - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - } - - resultVal := reflect.ValueOf(replaceVal) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - // if we don't have at least 2 values, we're not going to find a map, but - // we could panic. - if len(w.cs) < 2 { - return - } - - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func hasUnknownValue(variable []interface{}) bool { - for _, value := range variable { - if strVal, ok := value.(string); ok { - if strVal == UnknownVariableValue { - return true - } - } - } - return false -} - -func (w *interpolationWalker) splitSlice() { - raw := w.cs[len(w.cs)-1] - - var s []interface{} - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - } - - split := false - for _, val := range s { - if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { - split = true - } - if _, ok := val.([]interface{}); ok { - split = true - } - } - - if !split { - return - } - - result := make([]interface{}, 0) - for _, v := range s { - switch val := v.(type) { - case ast.Variable: - switch val.Type { - case ast.TypeList: - elements := val.Value.([]ast.Variable) - for _, element := range elements { - result = append(result, element.Value) - } - default: - result = append(result, val.Value) - } - case []interface{}: - for _, element := range val { - result = append(result, element) - } - default: - result = append(result, v) - } - } - - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go deleted file mode 100644 index 890d30b..0000000 --- a/vendor/github.com/hashicorp/terraform/config/lang.go +++ /dev/null @@ -1,11 +0,0 @@ -package config - -import ( - "github.com/hashicorp/hil/ast" -) - -type noopNode struct{} - -func (n *noopNode) Accept(ast.Visitor) ast.Node { return n } -func (n *noopNode) Pos() ast.Pos { return ast.Pos{} } -func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go deleted file mode 100644 index 5dd7d46..0000000 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ /dev/null @@ -1,224 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/hcl" -) - -// ErrNoConfigsFound is the error returned by LoadDir if no -// Terraform configuration files were found in the given directory. -type ErrNoConfigsFound struct { - Dir string -} - -func (e ErrNoConfigsFound) Error() string { - return fmt.Sprintf( - "No Terraform configuration files found in directory: %s", - e.Dir) -} - -// LoadJSON loads a single Terraform configuration from a given JSON document. -// -// The document must be a complete Terraform configuration. This function will -// NOT try to load any additional modules so only the given document is loaded. -func LoadJSON(raw json.RawMessage) (*Config, error) { - obj, err := hcl.Parse(string(raw)) - if err != nil { - return nil, fmt.Errorf( - "Error parsing JSON document as HCL: %s", err) - } - - // Start building the result - hclConfig := &hclConfigurable{ - Root: obj, - } - - return hclConfig.Config() -} - -// LoadFile loads the Terraform configuration from a given file. -// -// This file can be any format that Terraform recognizes, and import any -// other format that Terraform recognizes. -func LoadFile(path string) (*Config, error) { - importTree, err := loadTree(path) - if err != nil { - return nil, err - } - - configTree, err := importTree.ConfigTree() - - // Close the importTree now so that we can clear resources as quickly - // as possible. - importTree.Close() - - if err != nil { - return nil, err - } - - return configTree.Flatten() -} - -// LoadDir loads all the Terraform configuration files in a single -// directory and appends them together. -// -// Special files known as "override files" can also be present, which -// are merged into the loaded configuration. That is, the non-override -// files are loaded first to create the configuration. Then, the overrides -// are merged into the configuration to create the final configuration. -// -// Files are loaded in lexical order. -func LoadDir(root string) (*Config, error) { - files, overrides, err := dirFiles(root) - if err != nil { - return nil, err - } - if len(files) == 0 { - return nil, &ErrNoConfigsFound{Dir: root} - } - - // Determine the absolute path to the directory. - rootAbs, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - var result *Config - - // Sort the files and overrides so we have a deterministic order - sort.Strings(files) - sort.Strings(overrides) - - // Load all the regular files, append them to each other. - for _, f := range files { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - if result != nil { - result, err = Append(result, c) - if err != nil { - return nil, err - } - } else { - result = c - } - } - - // Load all the overrides, and merge them into the config - for _, f := range overrides { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - result, err = Merge(result, c) - if err != nil { - return nil, err - } - } - - // Mark the directory - result.Dir = rootAbs - - return result, nil -} - -// IsEmptyDir returns true if the directory given has no Terraform -// configuration files. -func IsEmptyDir(root string) (bool, error) { - if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { - return true, nil - } - - fs, os, err := dirFiles(root) - if err != nil { - return false, err - } - - return len(fs) == 0 && len(os) == 0, nil -} - -// Ext returns the Terraform configuration extension of the given -// path, or a blank string if it is an invalid function. -func ext(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -func dirFiles(dir string) ([]string, []string, error) { - f, err := os.Open(dir) - if err != nil { - return nil, nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, nil, err - } - if !fi.IsDir() { - return nil, nil, fmt.Errorf( - "configuration path must be a directory: %s", - dir) - } - - var files, overrides []string - err = nil - for err != io.EOF { - var fis []os.FileInfo - fis, err = f.Readdir(128) - if err != nil && err != io.EOF { - return nil, nil, err - } - - for _, fi := range fis { - // Ignore directories - if fi.IsDir() { - continue - } - - // Only care about files that are valid to load - name := fi.Name() - extValue := ext(name) - if extValue == "" || IsIgnoredFile(name) { - continue - } - - // Determine if we're dealing with an override - nameNoExt := name[:len(name)-len(extValue)] - override := nameNoExt == "override" || - strings.HasSuffix(nameNoExt, "_override") - - path := filepath.Join(dir, name) - if override { - overrides = append(overrides, path) - } else { - files = append(files, path) - } - } - } - - return files, overrides, nil -} - -// IsIgnoredFile returns true or false depending on whether the -// provided file name is a file that should be ignored. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go deleted file mode 100644 index bcd4d43..0000000 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ /dev/null @@ -1,1160 +0,0 @@ -package config - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" -) - -// hclConfigurable is an implementation of configurable that knows -// how to turn HCL configuration into a *Config object. -type hclConfigurable struct { - File string - Root *ast.File -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedProviderFields = []string{ - "alias", - "id", - "version", -} - -func (t *hclConfigurable) Config() (*Config, error) { - validKeys := map[string]struct{}{ - "atlas": struct{}{}, - "data": struct{}{}, - "module": struct{}{}, - "output": struct{}{}, - "provider": struct{}{}, - "resource": struct{}{}, - "terraform": struct{}{}, - "variable": struct{}{}, - } - - // Top-level item should be the object list - list, ok := t.Root.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("error parsing: file doesn't contain a root object") - } - - // Start building up the actual configuration. - config := new(Config) - - // Terraform config - if o := list.Filter("terraform"); len(o.Items) > 0 { - var err error - config.Terraform, err = loadTerraformHcl(o) - if err != nil { - return nil, err - } - } - - // Build the variables - if vars := list.Filter("variable"); len(vars.Items) > 0 { - var err error - config.Variables, err = loadVariablesHcl(vars) - if err != nil { - return nil, err - } - } - - // Get Atlas configuration - if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { - var err error - config.Atlas, err = loadAtlasHcl(atlas) - if err != nil { - return nil, err - } - } - - // Build the modules - if modules := list.Filter("module"); len(modules.Items) > 0 { - var err error - config.Modules, err = loadModulesHcl(modules) - if err != nil { - return nil, err - } - } - - // Build the provider configs - if providers := list.Filter("provider"); len(providers.Items) > 0 { - var err error - config.ProviderConfigs, err = loadProvidersHcl(providers) - if err != nil { - return nil, err - } - } - - // Build the resources - { - var err error - managedResourceConfigs := list.Filter("resource") - dataResourceConfigs := list.Filter("data") - - config.Resources = make( - []*Resource, 0, - len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items), - ) - - managedResources, err := loadManagedResourcesHcl(managedResourceConfigs) - if err != nil { - return nil, err - } - dataResources, err := loadDataResourcesHcl(dataResourceConfigs) - if err != nil { - return nil, err - } - - config.Resources = append(config.Resources, dataResources...) - config.Resources = append(config.Resources, managedResources...) - } - - // Build the outputs - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - var err error - config.Outputs, err = loadOutputsHcl(outputs) - if err != nil { - return nil, err - } - } - - // Check for invalid keys - for _, item := range list.Items { - if len(item.Keys) == 0 { - // Not sure how this would happen, but let's avoid a panic - continue - } - - k := item.Keys[0].Token.Value().(string) - if _, ok := validKeys[k]; ok { - continue - } - - config.unknownKeys = append(config.unknownKeys, k) - } - - return config, nil -} - -// loadFileHcl is a fileLoaderFunc that knows how to read HCL -// files and turn them into hclConfigurables. -func loadFileHcl(root string) (configurable, []string, error) { - // Read the HCL file and prepare for parsing - d, err := ioutil.ReadFile(root) - if err != nil { - return nil, nil, fmt.Errorf( - "Error reading %s: %s", root, err) - } - - // Parse it - hclRoot, err := hcl.Parse(string(d)) - if err != nil { - return nil, nil, fmt.Errorf( - "Error parsing %s: %s", root, err) - } - - // Start building the result - result := &hclConfigurable{ - File: root, - Root: hclRoot, - } - - // Dive in, find the imports. This is disabled for now since - // imports were removed prior to Terraform 0.1. The code is - // remaining here commented for historical purposes. - /* - imports := obj.Get("import") - if imports == nil { - result.Object.Ref() - return result, nil, nil - } - - if imports.Type() != libucl.ObjectTypeString { - imports.Close() - - return nil, nil, fmt.Errorf( - "Error in %s: all 'import' declarations should be in the format\n"+ - "`import \"foo\"` (Got type %s)", - root, - imports.Type()) - } - - // Gather all the import paths - importPaths := make([]string, 0, imports.Len()) - iter := imports.Iterate(false) - for imp := iter.Next(); imp != nil; imp = iter.Next() { - path := imp.ToString() - if !filepath.IsAbs(path) { - // Relative paths are relative to the Terraform file itself - dir := filepath.Dir(root) - path = filepath.Join(dir, path) - } - - importPaths = append(importPaths, path) - imp.Close() - } - iter.Close() - imports.Close() - - result.Object.Ref() - */ - - return result, nil, nil -} - -// Given a handle to a HCL object, this transforms it into the Terraform config -func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'terraform' block allowed per module") - } - - // Get our one item - item := list.Items[0] - - // This block should have an empty top level ObjectItem. If there are keys - // here, it's likely because we have a flattened JSON object, and we can - // lift this into a nested ObjectList to decode properly. - if len(item.Keys) > 0 { - item = &ast.ObjectItem{ - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - } - - // We need the item value as an ObjectList - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("terraform block: should be an object") - } - - // NOTE: We purposely don't validate unknown HCL keys here so that - // we can potentially read _future_ Terraform version config (to - // still be able to validate the required version). - // - // We should still keep track of unknown keys to validate later, but - // HCL doesn't currently support that. - - var config Terraform - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading terraform config: %s", - err) - } - - // If we have provisioners, then parse those out - if os := listVal.Filter("backend"); len(os.Items) > 0 { - var err error - config.Backend, err = loadTerraformBackendHcl(os) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config for terraform block: %s", - err) - } - } - - return &config, nil -} - -// Loads the Backend configuration from an object list. -func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'backend' block allowed") - } - - // Get our one item - item := list.Items[0] - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'backend' must be followed by exactly one string: a type", - item.Pos()) - } - - typ := item.Keys[0].Token.Value().(string) - - // Decode the raw config - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - b := &Backend{ - Type: typ, - RawConfig: rawConfig, - } - b.Hash = b.Rehash() - - return b, nil -} - -// Given a handle to a HCL object, this transforms it into the Atlas -// configuration. -func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'atlas' block allowed") - } - - // Get our one item - item := list.Items[0] - - var config AtlasConfig - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading atlas config: %s", - err) - } - - return &config, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of modules. -// -// The resulting modules may not be unique, but each module -// represents exactly one module definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { - if err := assertAllBlocksHaveNames("module", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Module - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - k := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - // Remove the fields we handle specially - delete(config, "source") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - // If we have a count, then figure it out - var source string - if o := listVal.Filter("source"); len(o.Items) > 0 { - err = hcl.DecodeObject(&source, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing source for %s: %s", - k, - err) - } - } - - result = append(result, &Module{ - Name: k, - Source: source, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// LoadOutputsHcl recurses into the given HCL object and turns -// it into a mapping of outputs. -func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { - if err := assertAllBlocksHaveNames("output", list); err != nil { - return nil, err - } - - list = list.Children() - - // Go through each object and turn it into an actual result. - result := make([]*Output, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("output '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Delete special keys - delete(config, "depends_on") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for output %s: %s", - n, - err) - } - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for output %q: %s", - n, - err) - } - } - - result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, - }) - } - - return result, nil -} - -// LoadVariablesHcl recurses into the given HCL object and turns -// it into a list of variables. -func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { - if err := assertAllBlocksHaveNames("variable", list); err != nil { - return nil, err - } - - list = list.Children() - - // hclVariable is the structure each variable is decoded into - type hclVariable struct { - DeclaredType string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - // Go through each object and turn it into an actual result. - result := make([]*Variable, 0, len(list.Items)) - for _, item := range list.Items { - // Clean up items from JSON - unwrapHCLObjectKeysFromJSON(item, 1) - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'variable' must be followed by exactly one strings: a name", - item.Pos()) - } - - n := item.Keys[0].Token.Value().(string) - if !NameRegexp.MatchString(n) { - return nil, fmt.Errorf( - "position %s: 'variable' name must match regular expression: %s", - item.Pos(), NameRegexp) - } - - // Check for invalid keys - valid := []string{"type", "default", "description"} - if err := checkHCLKeys(item.Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "variable[%s]:", n)) - } - - // Decode into hclVariable to get typed values - var hclVar hclVariable - if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { - return nil, err - } - - // Defaults turn into a slice of map[string]interface{} and - // we need to make sure to convert that down into the - // proper type for Config. - if ms, ok := hclVar.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - - hclVar.Default = def - } - - // Build the new variable and do some basic validation - newVar := &Variable{ - Name: n, - DeclaredType: hclVar.DeclaredType, - Default: hclVar.Default, - Description: hclVar.Description, - } - if err := newVar.ValidateTypeAndDefault(); err != nil { - return nil, err - } - - result = append(result, newVar) - } - - return result, nil -} - -// LoadProvidersHcl recurses into the given HCL object and turns -// it into a mapping of provider configs. -func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { - if err := assertAllBlocksHaveNames("provider", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*ProviderConfig, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - delete(config, "alias") - delete(config, "version") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for provider config %s: %s", - n, - err) - } - - // If we have an alias field, then add those in - var alias string - if a := listVal.Filter("alias"); len(a.Items) > 0 { - err := hcl.DecodeObject(&alias, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading alias for provider[%s]: %s", - n, - err) - } - } - - // If we have a version field then extract it - var version string - if a := listVal.Filter("version"); len(a.Items) > 0 { - err := hcl.DecodeObject(&version, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading version for provider[%s]: %s", - n, - err) - } - } - - result = append(result, &ProviderConfig{ - Name: n, - Alias: alias, - Version: version, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of data sources. -// -// The resulting data sources may not be unique, but each one -// represents exactly one data definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - if err := assertAllBlocksHaveNames("data", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: 'data' must be followed by exactly two strings: a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "depends_on") - delete(config, "provider") - delete(config, "count") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: DataResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provider: provider, - Provisioners: []*Provisioner{}, - DependsOn: dependsOn, - Lifecycle: ResourceLifecycle{}, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of managed resources. -// -// The resulting resources may not be unique, but each resource -// represents exactly one "resource" block in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - // GH-4385: We detect a pure provisioner resource and give the user - // an error about how to do it cleanly. - if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" { - return nil, fmt.Errorf( - "position %s: provisioners in a resource should be wrapped in a list\n\n"+ - "Example: \"provisioner\": [ { \"local-exec\": ... } ]", - item.Pos()) - } - - // Fix up JSON input - unwrapHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: resource must be followed by exactly two strings, a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "connection") - delete(config, "count") - delete(config, "depends_on") - delete(config, "provisioner") - delete(config, "provider") - delete(config, "lifecycle") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have connection info, then parse those out - var connInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&connInfo, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading connection info for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have provisioners, then parse those out - var provisioners []*Provisioner - if os := listVal.Filter("provisioner"); len(os.Items) > 0 { - var err error - provisioners, err = loadProvisionersHcl(os, connInfo) - if err != nil { - return nil, fmt.Errorf( - "Error reading provisioners for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - // Check if the resource should be re-created before - // destroying the existing instance - var lifecycle ResourceLifecycle - if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { - if len(o.Items) > 1 { - return nil, fmt.Errorf( - "%s[%s]: Multiple lifecycle blocks found, expected one", - t, k) - } - - // Check for invalid keys - valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} - if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "%s[%s]:", t, k)) - } - - var raw map[string]interface{} - if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - - if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: ManagedResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provisioners: provisioners, - Provider: provider, - DependsOn: dependsOn, - Lifecycle: lifecycle, - }) - } - - return result, nil -} - -func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { - if err := assertAllBlocksHaveNames("provisioner", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*Provisioner, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("provisioner '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Parse the "when" value - when := ProvisionerWhenCreate - if v, ok := config["when"]; ok { - switch v { - case "create": - when = ProvisionerWhenCreate - case "destroy": - when = ProvisionerWhenDestroy - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' when must be 'create' or 'destroy'", - item.Pos()) - } - } - - // Parse the "on_failure" value - onFailure := ProvisionerOnFailureFail - if v, ok := config["on_failure"]; ok { - switch v { - case "continue": - onFailure = ProvisionerOnFailureContinue - case "fail": - onFailure = ProvisionerOnFailureFail - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", - item.Pos()) - } - } - - // Delete fields we special case - delete(config, "connection") - delete(config, "when") - delete(config, "on_failure") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, err - } - - // Check if we have a provisioner-level connection - // block that overrides the resource-level - var subConnInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) - if err != nil { - return nil, err - } - } - - // Inherit from the resource connInfo any keys - // that are not explicitly overriden. - if connInfo != nil && subConnInfo != nil { - for k, v := range connInfo { - if _, ok := subConnInfo[k]; !ok { - subConnInfo[k] = v - } - } - } else if subConnInfo == nil { - subConnInfo = connInfo - } - - // Parse the connInfo - connRaw, err := NewRawConfig(subConnInfo) - if err != nil { - return nil, err - } - - result = append(result, &Provisioner{ - Type: n, - RawConfig: rawConfig, - ConnInfo: connRaw, - When: when, - OnFailure: onFailure, - }) - } - - return result, nil -} - -/* -func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { - objects := make(map[string][]*hclobj.Object) - - for _, o := range os.Elem(false) { - for _, elem := range o.Elem(true) { - val, ok := objects[elem.Key] - if !ok { - val = make([]*hclobj.Object, 0, 1) - } - - val = append(val, elem) - objects[elem.Key] = val - } - } - - return objects -} -*/ - -// assertAllBlocksHaveNames returns an error if any of the items in -// the given object list are blocks without keys (like "module {}") -// or simple assignments (like "module = 1"). It returns nil if -// neither of these things are true. -// -// The given name is used in any generated error messages, and should -// be the name of the block we're dealing with. The given list should -// be the result of calling .Filter on an object list with that same -// name. -func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { - if elem := list.Elem(); len(elem.Items) != 0 { - switch et := elem.Items[0].Val.(type) { - case *ast.ObjectType: - pos := et.Lbrace - return fmt.Errorf("%s: %q must be followed by a name", pos, name) - default: - pos := elem.Items[0].Val.Pos() - return fmt.Errorf("%s: %q must be a configuration block", pos, name) - } - } - return nil -} - -func checkHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf( - "invalid key: %s", key)) - } - } - - return result -} - -// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{ - &ast.ObjectItem{ - Keys: []*ast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go deleted file mode 100644 index db214be..0000000 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ /dev/null @@ -1,193 +0,0 @@ -package config - -// Merge merges two configurations into a single configuration. -// -// Merge allows for the two configurations to have duplicate resources, -// because the resources will be merged. This differs from a single -// Config which must only have unique resources. -func Merge(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Merge unknown keys - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - // Merge Atlas configuration. This is a dumb one overrides the other - // sort of merge. - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // Merge the Terraform configuration - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - // NOTE: Everything below is pretty gross. Due to the lack of generics - // in Go, there is some hoop-jumping involved to make this merging a - // little more test-friendly and less repetitive. Ironically, making it - // less repetitive involves being a little repetitive, but I prefer to - // be repetitive with things that are less error prone than things that - // are more error prone (more logic). Type conversions to an interface - // are pretty low-error. - - var m1, m2, mresult []merger - - // Modules - m1 = make([]merger, 0, len(c1.Modules)) - m2 = make([]merger, 0, len(c2.Modules)) - for _, v := range c1.Modules { - m1 = append(m1, v) - } - for _, v := range c2.Modules { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Modules = make([]*Module, len(mresult)) - for i, v := range mresult { - c.Modules[i] = v.(*Module) - } - } - - // Outputs - m1 = make([]merger, 0, len(c1.Outputs)) - m2 = make([]merger, 0, len(c2.Outputs)) - for _, v := range c1.Outputs { - m1 = append(m1, v) - } - for _, v := range c2.Outputs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Outputs = make([]*Output, len(mresult)) - for i, v := range mresult { - c.Outputs[i] = v.(*Output) - } - } - - // Provider Configs - m1 = make([]merger, 0, len(c1.ProviderConfigs)) - m2 = make([]merger, 0, len(c2.ProviderConfigs)) - for _, v := range c1.ProviderConfigs { - m1 = append(m1, v) - } - for _, v := range c2.ProviderConfigs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.ProviderConfigs = make([]*ProviderConfig, len(mresult)) - for i, v := range mresult { - c.ProviderConfigs[i] = v.(*ProviderConfig) - } - } - - // Resources - m1 = make([]merger, 0, len(c1.Resources)) - m2 = make([]merger, 0, len(c2.Resources)) - for _, v := range c1.Resources { - m1 = append(m1, v) - } - for _, v := range c2.Resources { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Resources = make([]*Resource, len(mresult)) - for i, v := range mresult { - c.Resources[i] = v.(*Resource) - } - } - - // Variables - m1 = make([]merger, 0, len(c1.Variables)) - m2 = make([]merger, 0, len(c2.Variables)) - for _, v := range c1.Variables { - m1 = append(m1, v) - } - for _, v := range c2.Variables { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Variables = make([]*Variable, len(mresult)) - for i, v := range mresult { - c.Variables[i] = v.(*Variable) - } - } - - return c, nil -} - -// merger is an interface that must be implemented by types that are -// merge-able. This simplifies the implementation of Merge for the various -// components of a Config. -type merger interface { - mergerName() string - mergerMerge(merger) merger -} - -// mergeSlice merges a slice of mergers. -func mergeSlice(m1, m2 []merger) []merger { - r := make([]merger, len(m1), len(m1)+len(m2)) - copy(r, m1) - - m := map[string]struct{}{} - for _, v2 := range m2 { - // If we already saw it, just append it because its a - // duplicate and invalid... - name := v2.mergerName() - if _, ok := m[name]; ok { - r = append(r, v2) - continue - } - m[name] = struct{}{} - - // Find an original to override - var original merger - originalIndex := -1 - for i, v := range m1 { - if v.mergerName() == name { - originalIndex = i - original = v - break - } - } - - var v merger - if original == nil { - v = v2 - } else { - v = original.mergerMerge(v2) - } - - if originalIndex == -1 { - r = append(r, v) - } else { - r[originalIndex] = v - } - } - - return r -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go deleted file mode 100644 index 095f61d..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go +++ /dev/null @@ -1,114 +0,0 @@ -package module - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go deleted file mode 100644 index 96b4a63..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/get.go +++ /dev/null @@ -1,71 +0,0 @@ -package module - -import ( - "io/ioutil" - "os" - - "github.com/hashicorp/go-getter" -) - -// GetMode is an enum that describes how modules are loaded. -// -// GetModeLoad says that modules will not be downloaded or updated, they will -// only be loaded from the storage. -// -// GetModeGet says that modules can be initially downloaded if they don't -// exist, but otherwise to just load from the current version in storage. -// -// GetModeUpdate says that modules should be checked for updates and -// downloaded prior to loading. If there are no updates, we load the version -// from disk, otherwise we download first and then load. -type GetMode byte - -const ( - GetModeNone GetMode = iota - GetModeGet - GetModeUpdate -) - -// GetCopy is the same as Get except that it downloads a copy of the -// module represented by source. -// -// This copy will omit and dot-prefixed files (such as .git/, .hg/) and -// can't be updated on its own. -func GetCopy(dst, src string) error { - // Create the temporary directory to do the real Get to - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - // FIXME: This isn't completely safe. Creating and removing our temp path - // exposes where to race to inject files. - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - // Get to that temporary dir - if err := getter.Get(tmpDir, src); err != nil { - return err - } - - // Make sure the destination exists - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - // Copy to the final location - return copyDir(dst, tmpDir) -} - -func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go deleted file mode 100644 index 8603ee2..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris - -package module - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go deleted file mode 100644 index 0d95730..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package module - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go deleted file mode 100644 index c0cf455..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package module - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go deleted file mode 100644 index f8649f6..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/module.go +++ /dev/null @@ -1,7 +0,0 @@ -package module - -// Module represents the metadata for a single module. -type Module struct { - Name string - Source string -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go deleted file mode 100644 index fc9e733..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/testing.go +++ /dev/null @@ -1,38 +0,0 @@ -package module - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/hashicorp/go-getter" -) - -// TestTree loads a module at the given path and returns the tree as well -// as a function that should be deferred to clean up resources. -func TestTree(t *testing.T, path string) (*Tree, func()) { - // Create a temporary directory for module storage - dir, err := ioutil.TempDir("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - return nil, nil - } - - // Load the module - mod, err := NewTreeModule("", path) - if err != nil { - t.Fatalf("err: %s", err) - return nil, nil - } - - // Get the child modules - s := &getter.FolderStorage{StorageDir: dir} - if err := mod.Load(s, GetModeGet); err != nil { - t.Fatalf("err: %s", err) - return nil, nil - } - - return mod, func() { - os.RemoveAll(dir) - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go deleted file mode 100644 index 4b0b153..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ /dev/null @@ -1,447 +0,0 @@ -package module - -import ( - "bufio" - "bytes" - "fmt" - "path/filepath" - "strings" - "sync" - - "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/config" -) - -// RootName is the name of the root tree. -const RootName = "root" - -// Tree represents the module import tree of configurations. -// -// This Tree structure can be used to get (download) new modules, load -// all the modules without getting, flatten the tree into something -// Terraform can use, etc. -type Tree struct { - name string - config *config.Config - children map[string]*Tree - path []string - lock sync.RWMutex -} - -// NewTree returns a new Tree for the given config structure. -func NewTree(name string, c *config.Config) *Tree { - return &Tree{config: c, name: name} -} - -// NewEmptyTree returns a new tree that is empty (contains no configuration). -func NewEmptyTree() *Tree { - t := &Tree{config: &config.Config{}} - - // We do this dummy load so that the tree is marked as "loaded". It - // should never fail because this is just about a no-op. If it does fail - // we panic so we can know its a bug. - if err := t.Load(nil, GetModeGet); err != nil { - panic(err) - } - - return t -} - -// NewTreeModule is like NewTree except it parses the configuration in -// the directory and gives it a specific name. Use a blank name "" to specify -// the root module. -func NewTreeModule(name, dir string) (*Tree, error) { - c, err := config.LoadDir(dir) - if err != nil { - return nil, err - } - - return NewTree(name, c), nil -} - -// Config returns the configuration for this module. -func (t *Tree) Config() *config.Config { - return t.config -} - -// Child returns the child with the given path (by name). -func (t *Tree) Child(path []string) *Tree { - if t == nil { - return nil - } - - if len(path) == 0 { - return t - } - - c := t.Children()[path[0]] - if c == nil { - return nil - } - - return c.Child(path[1:]) -} - -// Children returns the children of this tree (the modules that are -// imported by this root). -// -// This will only return a non-nil value after Load is called. -func (t *Tree) Children() map[string]*Tree { - t.lock.RLock() - defer t.lock.RUnlock() - return t.children -} - -// DeepEach calls the provided callback for the receiver and then all of -// its descendents in the tree, allowing an operation to be performed on -// all modules in the tree. -// -// Parents will be visited before their children but otherwise the order is -// not defined. -func (t *Tree) DeepEach(cb func(*Tree)) { - t.lock.RLock() - defer t.lock.RUnlock() - t.deepEach(cb) -} - -func (t *Tree) deepEach(cb func(*Tree)) { - cb(t) - for _, c := range t.children { - c.deepEach(cb) - } -} - -// Loaded says whether or not this tree has been loaded or not yet. -func (t *Tree) Loaded() bool { - t.lock.RLock() - defer t.lock.RUnlock() - return t.children != nil -} - -// Modules returns the list of modules that this tree imports. -// -// This is only the imports of _this_ level of the tree. To retrieve the -// full nested imports, you'll have to traverse the tree. -func (t *Tree) Modules() []*Module { - result := make([]*Module, len(t.config.Modules)) - for i, m := range t.config.Modules { - result[i] = &Module{ - Name: m.Name, - Source: m.Source, - } - } - - return result -} - -// Name returns the name of the tree. This will be "" for the root -// tree and then the module name given for any children. -func (t *Tree) Name() string { - if t.name == "" { - return RootName - } - - return t.name -} - -// Load loads the configuration of the entire tree. -// -// The parameters are used to tell the tree where to find modules and -// whether it can download/update modules along the way. -// -// Calling this multiple times will reload the tree. -// -// Various semantic-like checks are made along the way of loading since -// module trees inherently require the configuration to be in a reasonably -// sane state: no circular dependencies, proper module sources, etc. A full -// suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s getter.Storage, mode GetMode) error { - t.lock.Lock() - defer t.lock.Unlock() - - // Reset the children if we have any - t.children = nil - - modules := t.Modules() - children := make(map[string]*Tree) - - // Go through all the modules and get the directory for them. - for _, m := range modules { - if _, ok := children[m.Name]; ok { - return fmt.Errorf( - "module %s: duplicated. module names must be unique", m.Name) - } - - // Determine the path to this child - path := make([]string, len(t.path), len(t.path)+1) - copy(path, t.path) - path = append(path, m.Name) - - // Split out the subdir if we have one - source, subDir := getter.SourceDirSubdir(m.Source) - - source, err := getter.Detect(source, t.config.Dir, getter.Detectors) - if err != nil { - return fmt.Errorf("module %s: %s", m.Name, err) - } - - // Check if the detector introduced something new. - source, subDir2 := getter.SourceDirSubdir(source) - if subDir2 != "" { - subDir = filepath.Join(subDir2, subDir) - } - - // Get the directory where this module is so we can load it - key := strings.Join(path, ".") - key = fmt.Sprintf("root.%s-%s", key, m.Source) - dir, ok, err := getStorage(s, key, source, mode) - if err != nil { - return err - } - if !ok { - return fmt.Errorf( - "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) - } - - // If we have a subdirectory, then merge that in - if subDir != "" { - dir = filepath.Join(dir, subDir) - } - - // Load the configurations.Dir(source) - children[m.Name], err = NewTreeModule(m.Name, dir) - if err != nil { - return fmt.Errorf( - "module %s: %s", m.Name, err) - } - - // Set the path of this child - children[m.Name].path = path - } - - // Go through all the children and load them. - for _, c := range children { - if err := c.Load(s, mode); err != nil { - return err - } - } - - // Set our tree up - t.children = children - - return nil -} - -// Path is the full path to this tree. -func (t *Tree) Path() []string { - return t.path -} - -// String gives a nice output to describe the tree. -func (t *Tree) String() string { - var result bytes.Buffer - path := strings.Join(t.path, ", ") - if path != "" { - path = fmt.Sprintf(" (path: %s)", path) - } - result.WriteString(t.Name() + path + "\n") - - cs := t.Children() - if cs == nil { - result.WriteString(" not loaded") - } else { - // Go through each child and get its string value, then indent it - // by two. - for _, c := range cs { - r := strings.NewReader(c.String()) - scanner := bufio.NewScanner(r) - for scanner.Scan() { - result.WriteString(" ") - result.WriteString(scanner.Text()) - result.WriteString("\n") - } - } - } - - return result.String() -} - -// Validate does semantic checks on the entire tree of configurations. -// -// This will call the respective config.Config.Validate() functions as well -// as verifying things such as parameters/outputs between the various modules. -// -// Load must be called prior to calling Validate or an error will be returned. -func (t *Tree) Validate() error { - if !t.Loaded() { - return fmt.Errorf("tree must be loaded before calling Validate") - } - - // If something goes wrong, here is our error template - newErr := &treeError{Name: []string{t.Name()}} - - // Terraform core does not handle root module children named "root". - // We plan to fix this in the future but this bug was brought up in - // the middle of a release and we don't want to introduce wide-sweeping - // changes at that time. - if len(t.path) == 1 && t.name == "root" { - return fmt.Errorf("root module cannot contain module named 'root'") - } - - // Validate our configuration first. - if err := t.config.Validate(); err != nil { - newErr.Add(err) - } - - // If we're the root, we do extra validation. This validation usually - // requires the entire tree (since children don't have parent pointers). - if len(t.path) == 0 { - if err := t.validateProviderAlias(); err != nil { - newErr.Add(err) - } - } - - // Get the child trees - children := t.Children() - - // Validate all our children - for _, c := range children { - err := c.Validate() - if err == nil { - continue - } - - verr, ok := err.(*treeError) - if !ok { - // Unknown error, just return... - return err - } - - // Append ourselves to the error and then return - verr.Name = append(verr.Name, t.Name()) - newErr.AddChild(verr) - } - - // Go over all the modules and verify that any parameters are valid - // variables into the module in question. - for _, m := range t.config.Modules { - tree, ok := children[m.Name] - if !ok { - // This should never happen because Load watches us - panic("module not found in children: " + m.Name) - } - - // Build the variables that the module defines - requiredMap := make(map[string]struct{}) - varMap := make(map[string]struct{}) - for _, v := range tree.config.Variables { - varMap[v.Name] = struct{}{} - - if v.Required() { - requiredMap[v.Name] = struct{}{} - } - } - - // Compare to the keys in our raw config for the module - for k, _ := range m.RawConfig.Raw { - if _, ok := varMap[k]; !ok { - newErr.Add(fmt.Errorf( - "module %s: %s is not a valid parameter", - m.Name, k)) - } - - // Remove the required - delete(requiredMap, k) - } - - // If we have any required left over, they aren't set. - for k, _ := range requiredMap { - newErr.Add(fmt.Errorf( - "module %s: required variable %q not set", - m.Name, k)) - } - } - - // Go over all the variables used and make sure that any module - // variables represent outputs properly. - for source, vs := range t.config.InterpolatedVariables() { - for _, v := range vs { - mv, ok := v.(*config.ModuleVariable) - if !ok { - continue - } - - tree, ok := children[mv.Name] - if !ok { - newErr.Add(fmt.Errorf( - "%s: undefined module referenced %s", - source, mv.Name)) - continue - } - - found := false - for _, o := range tree.config.Outputs { - if o.Name == mv.Field { - found = true - break - } - } - if !found { - newErr.Add(fmt.Errorf( - "%s: %s is not a valid output for module %s", - source, mv.Field, mv.Name)) - } - } - } - - return newErr.ErrOrNil() -} - -// treeError is an error use by Tree.Validate to accumulates all -// validation errors. -type treeError struct { - Name []string - Errs []error - Children []*treeError -} - -func (e *treeError) Add(err error) { - e.Errs = append(e.Errs, err) -} - -func (e *treeError) AddChild(err *treeError) { - e.Children = append(e.Children, err) -} - -func (e *treeError) ErrOrNil() error { - if len(e.Errs) > 0 || len(e.Children) > 0 { - return e - } - return nil -} - -func (e *treeError) Error() string { - name := strings.Join(e.Name, ".") - var out bytes.Buffer - fmt.Fprintf(&out, "module %s: ", name) - - if len(e.Errs) == 1 { - // single like error - out.WriteString(e.Errs[0].Error()) - } else { - // multi-line error - for _, err := range e.Errs { - fmt.Fprintf(&out, "\n %s", err) - } - } - - if len(e.Children) > 0 { - // start the next error on a new line - out.WriteString("\n ") - } - for _, child := range e.Children { - out.WriteString(child.Error()) - } - - return out.String() -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go deleted file mode 100644 index fcd37f4..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go +++ /dev/null @@ -1,57 +0,0 @@ -package module - -import ( - "bytes" - "encoding/gob" - - "github.com/hashicorp/terraform/config" -) - -func (t *Tree) GobDecode(bs []byte) error { - t.lock.Lock() - defer t.lock.Unlock() - - // Decode the gob data - var data treeGob - dec := gob.NewDecoder(bytes.NewReader(bs)) - if err := dec.Decode(&data); err != nil { - return err - } - - // Set the fields - t.name = data.Name - t.config = data.Config - t.children = data.Children - t.path = data.Path - - return nil -} - -func (t *Tree) GobEncode() ([]byte, error) { - data := &treeGob{ - Config: t.config, - Children: t.children, - Name: t.name, - Path: t.path, - } - - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - if err := enc.Encode(data); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// treeGob is used as a structure to Gob encode a tree. -// -// This structure is private so it can't be referenced but the fields are -// public, allowing Gob to properly encode this. When we decode this, we are -// able to turn it into a Tree. -type treeGob struct { - Config *config.Config - Children map[string]*Tree - Name string - Path []string -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go deleted file mode 100644 index 090d4f7..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go +++ /dev/null @@ -1,118 +0,0 @@ -package module - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// validateProviderAlias validates that all provider alias references are -// defined at some point in the parent tree. This improves UX by catching -// alias typos at the slight cost of requiring a declaration of usage. This -// is usually a good tradeoff since not many aliases are used. -func (t *Tree) validateProviderAlias() error { - // If we're not the root, don't perform this validation. We must be the - // root since we require full tree visibilty. - if len(t.path) != 0 { - return nil - } - - // We'll use a graph to keep track of defined aliases at each level. - // As long as a parent defines an alias, it is okay. - var g dag.AcyclicGraph - t.buildProviderAliasGraph(&g, nil) - - // Go through the graph and check that the usage is all good. - var err error - for _, v := range g.Vertices() { - pv, ok := v.(*providerAliasVertex) - if !ok { - // This shouldn't happen, just ignore it. - continue - } - - // If we're not using any aliases, fast track and just continue - if len(pv.Used) == 0 { - continue - } - - // Grab the ancestors since we're going to have to check if our - // parents define any of our aliases. - var parents []*providerAliasVertex - ancestors, _ := g.Ancestors(v) - for _, raw := range ancestors.List() { - if pv, ok := raw.(*providerAliasVertex); ok { - parents = append(parents, pv) - } - } - for k, _ := range pv.Used { - // Check if we define this - if _, ok := pv.Defined[k]; ok { - continue - } - - // Check for a parent - found := false - for _, parent := range parents { - _, found = parent.Defined[k] - if found { - break - } - } - if found { - continue - } - - // We didn't find the alias, error! - err = multierror.Append(err, fmt.Errorf( - "module %s: provider alias must be defined by the module or a parent: %s", - strings.Join(pv.Path, "."), k)) - } - } - - return err -} - -func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) { - // Add all our defined aliases - defined := make(map[string]struct{}) - for _, p := range t.config.ProviderConfigs { - defined[p.FullName()] = struct{}{} - } - - // Add all our used aliases - used := make(map[string]struct{}) - for _, r := range t.config.Resources { - if r.Provider != "" { - used[r.Provider] = struct{}{} - } - } - - // Add it to the graph - vertex := &providerAliasVertex{ - Path: t.Path(), - Defined: defined, - Used: used, - } - g.Add(vertex) - - // Connect to our parent if we have one - if parent != nil { - g.Connect(dag.BasicEdge(vertex, parent)) - } - - // Build all our children - for _, c := range t.Children() { - c.buildProviderAliasGraph(g, vertex) - } -} - -// providerAliasVertex is the vertex for the graph that keeps track of -// defined provider aliases. -type providerAliasVertex struct { - Path []string - Defined map[string]struct{} - Used map[string]struct{} -} diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go deleted file mode 100644 index 7a50782..0000000 --- a/vendor/github.com/hashicorp/terraform/config/providers.go +++ /dev/null @@ -1,103 +0,0 @@ -package config - -import "github.com/blang/semver" - -// ProviderVersionConstraint presents a constraint for a particular -// provider, identified by its full name. -type ProviderVersionConstraint struct { - Constraint string - ProviderType string -} - -// ProviderVersionConstraints is a map from provider full name to its associated -// ProviderVersionConstraint, as produced by Config.RequiredProviders. -type ProviderVersionConstraints map[string]ProviderVersionConstraint - -// RequiredProviders returns the ProviderVersionConstraints for this -// module. -// -// This includes both providers that are explicitly requested by provider -// blocks and those that are used implicitly by instantiating one of their -// resource types. In the latter case, the returned semver Range will -// accept any version of the provider. -func (c *Config) RequiredProviders() ProviderVersionConstraints { - ret := make(ProviderVersionConstraints, len(c.ProviderConfigs)) - - configs := c.ProviderConfigsByFullName() - - // In order to find the *implied* dependencies (those without explicit - // "provider" blocks) we need to walk over all of the resources and - // cross-reference with the provider configs. - for _, rc := range c.Resources { - providerName := rc.ProviderFullName() - var providerType string - - // Default to (effectively) no constraint whatsoever, but we might - // override if there's an explicit constraint in config. - constraint := ">=0.0.0" - - config, ok := configs[providerName] - if ok { - if config.Version != "" { - constraint = config.Version - } - providerType = config.Name - } else { - providerType = providerName - } - - ret[providerName] = ProviderVersionConstraint{ - ProviderType: providerType, - Constraint: constraint, - } - } - - return ret -} - -// RequiredRanges returns a semver.Range for each distinct provider type in -// the constraint map. If the same provider type appears more than once -// (e.g. because aliases are in use) then their respective constraints are -// combined such that they must *all* apply. -// -// The result of this method can be passed to the -// PluginMetaSet.ConstrainVersions method within the plugin/discovery -// package in order to filter down the available plugins to those which -// satisfy the given constraints. -// -// This function will panic if any of the constraints within cannot be -// parsed as semver ranges. This is guaranteed to never happen for a -// constraint set that was built from a configuration that passed validation. -func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range { - ret := make(map[string]semver.Range, len(cons)) - - for _, con := range cons { - spec := semver.MustParseRange(con.Constraint) - if existing, exists := ret[con.ProviderType]; exists { - ret[con.ProviderType] = existing.AND(spec) - } else { - ret[con.ProviderType] = spec - } - } - - return ret -} - -// ProviderConfigsByFullName returns a map from provider full names (as -// returned by ProviderConfig.FullName()) to the corresponding provider -// configs. -// -// This function returns no new information than what's already in -// c.ProviderConfigs, but returns it in a more convenient shape. If there -// is more than one provider config with the same full name then the result -// is undefined, but that is guaranteed not to happen for any config that -// has passed validation. -func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig { - ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs)) - - for _, pc := range c.ProviderConfigs { - ret[pc.FullName()] = pc - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go deleted file mode 100644 index 00fd43f..0000000 --- a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go +++ /dev/null @@ -1,40 +0,0 @@ -package config - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -var provisionerWhenStrs = map[ProvisionerWhen]string{ - ProvisionerWhenInvalid: "invalid", - ProvisionerWhenCreate: "create", - ProvisionerWhenDestroy: "destroy", -} - -func (v ProvisionerWhen) String() string { - return provisionerWhenStrs[v] -} - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{ - ProvisionerOnFailureInvalid: "invalid", - ProvisionerOnFailureContinue: "continue", - ProvisionerOnFailureFail: "fail", -} - -func (v ProvisionerOnFailure) String() string { - return provisionerOnFailureStrs[v] -} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go deleted file mode 100644 index f8498d8..0000000 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ /dev/null @@ -1,335 +0,0 @@ -package config - -import ( - "bytes" - "encoding/gob" - "sync" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" -) - -// UnknownVariableValue is a sentinel value that can be used -// to denote that the value of a variable is unknown at this time. -// RawConfig uses this information to build up data about -// unknown keys. -const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -// RawConfig is a structure that holds a piece of configuration -// where the overall structure is unknown since it will be used -// to configure a plugin or some other similar external component. -// -// RawConfigs can be interpolated with variables that come from -// other resources, user variables, etc. -// -// RawConfig supports a query-like interface to request -// information from deep within the structure. -type RawConfig struct { - Key string - Raw map[string]interface{} - Interpolations []ast.Node - Variables map[string]InterpolatedVariable - - lock sync.Mutex - config map[string]interface{} - unknownKeys []string -} - -// NewRawConfig creates a new RawConfig structure and populates the -// publicly readable struct fields. -func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { - result := &RawConfig{Raw: raw} - if err := result.init(); err != nil { - return nil, err - } - - return result, nil -} - -// RawMap returns a copy of the RawConfig.Raw map. -func (r *RawConfig) RawMap() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - - m := make(map[string]interface{}) - for k, v := range r.Raw { - m[k] = v - } - return m -} - -// Copy returns a copy of this RawConfig, uninterpolated. -func (r *RawConfig) Copy() *RawConfig { - if r == nil { - return nil - } - - r.lock.Lock() - defer r.lock.Unlock() - - newRaw := make(map[string]interface{}) - for k, v := range r.Raw { - newRaw[k] = v - } - - result, err := NewRawConfig(newRaw) - if err != nil { - panic("copy failed: " + err.Error()) - } - - result.Key = r.Key - return result -} - -// Value returns the value of the configuration if this configuration -// has a Key set. If this does not have a Key set, nil will be returned. -func (r *RawConfig) Value() interface{} { - if c := r.Config(); c != nil { - if v, ok := c[r.Key]; ok { - return v - } - } - - r.lock.Lock() - defer r.lock.Unlock() - return r.Raw[r.Key] -} - -// Config returns the entire configuration with the variables -// interpolated from any call to Interpolate. -// -// If any interpolated variables are unknown (value set to -// UnknownVariableValue), the first non-container (map, slice, etc.) element -// will be removed from the config. The keys of unknown variables -// can be found using the UnknownKeys function. -// -// By pruning out unknown keys from the configuration, the raw -// structure will always successfully decode into its ultimate -// structure using something like mapstructure. -func (r *RawConfig) Config() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - return r.config -} - -// Interpolate uses the given mapping of variable values and uses -// those as the values to replace any variables in this raw -// configuration. -// -// Any prior calls to Interpolate are replaced with this one. -// -// If a variable key is missing, this will panic. -func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { - r.lock.Lock() - defer r.lock.Unlock() - - config := langEvalConfig(vs) - return r.interpolate(func(root ast.Node) (interface{}, error) { - // None of the variables we need are computed, meaning we should - // be able to properly evaluate. - result, err := hil.Eval(root, config) - if err != nil { - return "", err - } - - return result.Value, nil - }) -} - -// Merge merges another RawConfig into this one (overriding any conflicting -// values in this config) and returns a new config. The original config -// is not modified. -func (r *RawConfig) Merge(other *RawConfig) *RawConfig { - r.lock.Lock() - defer r.lock.Unlock() - - // Merge the raw configurations - raw := make(map[string]interface{}) - for k, v := range r.Raw { - raw[k] = v - } - for k, v := range other.Raw { - raw[k] = v - } - - // Create the result - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - // Merge the interpolated results - result.config = make(map[string]interface{}) - for k, v := range r.config { - result.config[k] = v - } - for k, v := range other.config { - result.config[k] = v - } - - // Build the unknown keys - if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { - unknownKeys := make(map[string]struct{}) - for _, k := range r.unknownKeys { - unknownKeys[k] = struct{}{} - } - for _, k := range other.unknownKeys { - unknownKeys[k] = struct{}{} - } - - result.unknownKeys = make([]string, 0, len(unknownKeys)) - for k, _ := range unknownKeys { - result.unknownKeys = append(result.unknownKeys, k) - } - } - - return result -} - -func (r *RawConfig) init() error { - r.lock.Lock() - defer r.lock.Unlock() - - r.config = r.Raw - r.Interpolations = nil - r.Variables = nil - - fn := func(node ast.Node) (interface{}, error) { - r.Interpolations = append(r.Interpolations, node) - vars, err := DetectVariables(node) - if err != nil { - return "", err - } - - for _, v := range vars { - if r.Variables == nil { - r.Variables = make(map[string]InterpolatedVariable) - } - - r.Variables[v.FullKey()] = v - } - - return "", nil - } - - walker := &interpolationWalker{F: fn} - if err := reflectwalk.Walk(r.Raw, walker); err != nil { - return err - } - - return nil -} - -func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { - config, err := copystructure.Copy(r.Raw) - if err != nil { - return err - } - r.config = config.(map[string]interface{}) - - w := &interpolationWalker{F: fn, Replace: true} - err = reflectwalk.Walk(r.config, w) - if err != nil { - return err - } - - r.unknownKeys = w.unknownKeys - return nil -} - -func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { - if r == nil && r2 == nil { - return nil - } - - if r == nil { - r = &RawConfig{} - } - - rawRaw, err := copystructure.Copy(r.Raw) - if err != nil { - panic(err) - } - - raw := rawRaw.(map[string]interface{}) - if r2 != nil { - for k, v := range r2.Raw { - raw[k] = v - } - } - - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - return result -} - -// UnknownKeys returns the keys of the configuration that are unknown -// because they had interpolated variables that must be computed. -func (r *RawConfig) UnknownKeys() []string { - r.lock.Lock() - defer r.lock.Unlock() - return r.unknownKeys -} - -// See GobEncode -func (r *RawConfig) GobDecode(b []byte) error { - var data gobRawConfig - err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data) - if err != nil { - return err - } - - r.Key = data.Key - r.Raw = data.Raw - - return r.init() -} - -// GobEncode is a custom Gob encoder to use so that we only include the -// raw configuration. Interpolated variables and such are lost and the -// tree of interpolated variables is recomputed on decode, since it is -// referentially transparent. -func (r *RawConfig) GobEncode() ([]byte, error) { - r.lock.Lock() - defer r.lock.Unlock() - - data := gobRawConfig{ - Key: r.Key, - Raw: r.Raw, - } - - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(data); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type gobRawConfig struct { - Key string - Raw map[string]interface{} -} - -// langEvalConfig returns the evaluation configuration we use to execute. -func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig { - funcMap := make(map[string]ast.Function) - for k, v := range Funcs() { - funcMap[k] = v - } - funcMap["lookup"] = interpolationFuncLookup(vs) - funcMap["keys"] = interpolationFuncKeys(vs) - funcMap["values"] = interpolationFuncValues(vs) - - return &hil.EvalConfig{ - GlobalScope: &ast.BasicScope{ - VarMap: vs, - FuncMap: funcMap, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go deleted file mode 100644 index 877c6e8..0000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go -type ResourceMode int - -const ( - ManagedResourceMode ResourceMode = iota - DataResourceMode -) diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go deleted file mode 100644 index ea68b4f..0000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. - -package config - -import "fmt" - -const _ResourceMode_name = "ManagedResourceModeDataResourceMode" - -var _ResourceMode_index = [...]uint8{0, 19, 35} - -func (i ResourceMode) String() string { - if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) - } - return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go deleted file mode 100644 index f7bfadd..0000000 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ /dev/null @@ -1,15 +0,0 @@ -package config - -import ( - "testing" -) - -// TestRawConfig is used to create a RawConfig for testing. -func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { - cfg, err := NewRawConfig(c) - if err != nil { - t.Fatalf("err: %s", err) - } - - return cfg -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go deleted file mode 100644 index f8776bc..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ /dev/null @@ -1,286 +0,0 @@ -package dag - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/go-multierror" -) - -// AcyclicGraph is a specialization of Graph that cannot have cycles. With -// this property, we get the property of sane graph traversal. -type AcyclicGraph struct { - Graph -} - -// WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) error - -// DepthWalkFunc is a walk function that also receives the current depth of the -// walk as an argument -type DepthWalkFunc func(Vertex, int) error - -func (g *AcyclicGraph) DirectedGraph() Grapher { - return g -} - -// Returns a Set that includes every Vertex yielded by walking down from the -// provided starting Vertex v. -func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.DownEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.DepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Returns a Set that includes every Vertex yielded by walking up from the -// provided starting Vertex v. -func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.UpEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Root returns the root of the DAG, or an error. -// -// Complexity: O(V) -func (g *AcyclicGraph) Root() (Vertex, error) { - roots := make([]Vertex, 0, 1) - for _, v := range g.Vertices() { - if g.UpEdges(v).Len() == 0 { - roots = append(roots, v) - } - } - - if len(roots) > 1 { - // TODO(mitchellh): make this error message a lot better - return nil, fmt.Errorf("multiple roots: %#v", roots) - } - - if len(roots) == 0 { - return nil, fmt.Errorf("no roots found") - } - - return roots[0], nil -} - -// TransitiveReduction performs the transitive reduction of graph g in place. -// The transitive reduction of a graph is a graph with as few edges as -// possible with the same reachability as the original graph. This means -// that if there are three nodes A => B => C, and A connects to both -// B and C, and B connects to C, then the transitive reduction is the -// same graph with only a single edge between A and B, and a single edge -// between B and C. -// -// The graph must be valid for this operation to behave properly. If -// Validate() returns an error, the behavior is undefined and the results -// will likely be unexpected. -// -// Complexity: O(V(V+E)), or asymptotically O(VE) -func (g *AcyclicGraph) TransitiveReduction() { - // For each vertex u in graph g, do a DFS starting from each vertex - // v such that the edge (u,v) exists (v is a direct descendant of u). - // - // For each v-prime reachable from v, remove the edge (u, v-prime). - defer g.debug.BeginOperation("TransitiveReduction", "").End("") - - for _, u := range g.Vertices() { - uTargets := g.DownEdges(u) - vs := AsVertexList(g.DownEdges(u)) - - g.DepthFirstWalk(vs, func(v Vertex, d int) error { - shared := uTargets.Intersection(g.DownEdges(v)) - for _, vPrime := range AsVertexList(shared) { - g.RemoveEdge(BasicEdge(u, vPrime)) - } - - return nil - }) - } -} - -// Validate validates the DAG. A DAG is valid if it has a single root -// with no cycles. -func (g *AcyclicGraph) Validate() error { - if _, err := g.Root(); err != nil { - return err - } - - // Look for cycles of more than 1 component - var err error - cycles := g.Cycles() - if len(cycles) > 0 { - for _, cycle := range cycles { - cycleStr := make([]string, len(cycle)) - for j, vertex := range cycle { - cycleStr[j] = VertexName(vertex) - } - - err = multierror.Append(err, fmt.Errorf( - "Cycle: %s", strings.Join(cycleStr, ", "))) - } - } - - // Look for cycles to self - for _, e := range g.Edges() { - if e.Source() == e.Target() { - err = multierror.Append(err, fmt.Errorf( - "Self reference: %s", VertexName(e.Source()))) - } - } - - return err -} - -func (g *AcyclicGraph) Cycles() [][]Vertex { - var cycles [][]Vertex - for _, cycle := range StronglyConnected(&g.Graph) { - if len(cycle) > 1 { - cycles = append(cycles, cycle) - } - } - return cycles -} - -// Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. Because the walk is done -// in parallel, the error returned will be a multierror. -func (g *AcyclicGraph) Walk(cb WalkFunc) error { - defer g.debug.BeginOperation(typeWalk, "").End("") - - w := &Walker{Callback: cb, Reverse: true} - w.Update(g) - return w.Wait() -} - -// simple convenience helper for converting a dag.Set to a []Vertex -func AsVertexList(s *Set) []Vertex { - rawList := s.List() - vertexList := make([]Vertex, len(rawList)) - for i, raw := range rawList { - vertexList[i] = raw.(Vertex) - } - return vertexList -} - -type vertexAtDepth struct { - Vertex Vertex - Depth int -} - -// depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. This is not exported now but it would make sense -// to export this publicly at some point. -func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. - targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start. -func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Add next set of targets in a consistent order. - targets := AsVertexList(g.UpEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// byVertexName implements sort.Interface so a list of Vertices can be sorted -// consistently by their VertexName -type byVertexName []Vertex - -func (b byVertexName) Len() int { return len(b) } -func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byVertexName) Less(i, j int) bool { - return VertexName(b[i]) < VertexName(b[j]) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go deleted file mode 100644 index 7e6d2af..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/dot.go +++ /dev/null @@ -1,282 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// DotOpts are the options for generating a dot formatted Graph. -type DotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int - - // use this to keep the cluster_ naming convention from the previous dot writer - cluster bool -} - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *DotOpts) *DotNode -} - -// DotNode provides a structure for Vertices to return in order to specify their -// dot format. -type DotNode struct { - Name string - Attrs map[string]string -} - -// Returns the DOT representation of this Graph. -func (g *marshalGraph) Dot(opts *DotOpts) []byte { - if opts == nil { - opts = &DotOpts{ - DrawCycles: true, - MaxDepth: -1, - Verbose: true, - } - } - - var w indentWriter - w.WriteString("digraph {\n") - w.Indent() - - // some dot defaults - w.WriteString(`compound = "true"` + "\n") - w.WriteString(`newrank = "true"` + "\n") - - // the top level graph is written as the first subgraph - w.WriteString(`subgraph "root" {` + "\n") - g.writeBody(opts, &w) - - // cluster isn't really used other than for naming purposes in some graphs - opts.cluster = opts.MaxDepth != 0 - maxDepth := opts.MaxDepth - if maxDepth == 0 { - maxDepth = -1 - } - - for _, s := range g.Subgraphs { - g.writeSubgraph(s, opts, maxDepth, &w) - } - - w.Unindent() - w.WriteString("}\n") - return w.Bytes() -} - -func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - name := v.Name - attrs := v.Attrs - if v.graphNodeDotter != nil { - node := v.graphNodeDotter.DotNode(name, opts) - if node == nil { - return []byte{} - } - - newAttrs := make(map[string]string) - for k, v := range attrs { - newAttrs[k] = v - } - for k, v := range node.Attrs { - newAttrs[k] = v - } - - name = node.Name - attrs = newAttrs - } - - buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) - writeAttrs(&buf, attrs) - buf.WriteByte('\n') - - return buf.Bytes() -} - -func (e *marshalEdge) dot(g *marshalGraph) string { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - sourceName := g.vertexByID(e.Source).Name - targetName := g.vertexByID(e.Target).Name - s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) - buf.WriteString(s) - writeAttrs(&buf, e.Attrs) - - return buf.String() -} - -func cycleDot(e *marshalEdge, g *marshalGraph) string { - return e.dot(g) + ` [color = "red", penwidth = "2.0"]` -} - -// Write the subgraph body. The is recursive, and the depth argument is used to -// record the current depth of iteration. -func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { - if depth == 0 { - return - } - depth-- - - name := sg.Name - if opts.cluster { - // we prefix with cluster_ to match the old dot output - name = "cluster_" + name - sg.Attrs["label"] = sg.Name - } - w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) - sg.writeBody(opts, w) - - for _, sg := range sg.Subgraphs { - g.writeSubgraph(sg, opts, depth, w) - } -} - -func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { - w.Indent() - - for _, as := range attrStrings(g.Attrs) { - w.WriteString(as + "\n") - } - - // list of Vertices that aren't to be included in the dot output - skip := map[string]bool{} - - for _, v := range g.Vertices { - if v.graphNodeDotter == nil { - skip[v.ID] = true - continue - } - - w.Write(v.dot(g, opts)) - } - - var dotEdges []string - - if opts.DrawCycles { - for _, c := range g.Cycles { - if len(c) < 2 { - continue - } - - for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { - if j >= len(c) { - j = 0 - } - src := c[i] - tgt := c[j] - - if skip[src.ID] || skip[tgt.ID] { - continue - } - - e := &marshalEdge{ - Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), - Source: src.ID, - Target: tgt.ID, - Attrs: make(map[string]string), - } - - dotEdges = append(dotEdges, cycleDot(e, g)) - src = tgt - } - } - } - - for _, e := range g.Edges { - dotEdges = append(dotEdges, e.dot(g)) - } - - // srot these again to match the old output - sort.Strings(dotEdges) - - for _, e := range dotEdges { - w.WriteString(e + "\n") - } - - w.Unindent() - w.WriteString("}\n") -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} - -// Provide a bytes.Buffer like structure, which will indent when starting a -// newline. -type indentWriter struct { - bytes.Buffer - level int -} - -func (w *indentWriter) indent() { - newline := []byte("\n") - if !bytes.HasSuffix(w.Bytes(), newline) { - return - } - for i := 0; i < w.level; i++ { - w.Buffer.WriteString("\t") - } -} - -// Indent increases indentation by 1 -func (w *indentWriter) Indent() { w.level++ } - -// Unindent decreases indentation by 1 -func (w *indentWriter) Unindent() { w.level-- } - -// the following methods intercecpt the byte.Buffer writes and insert the -// indentation when starting a new line. -func (w *indentWriter) Write(b []byte) (int, error) { - w.indent() - return w.Buffer.Write(b) -} - -func (w *indentWriter) WriteString(s string) (int, error) { - w.indent() - return w.Buffer.WriteString(s) -} -func (w *indentWriter) WriteByte(b byte) error { - w.indent() - return w.Buffer.WriteByte(b) -} -func (w *indentWriter) WriteRune(r rune) (int, error) { - w.indent() - return w.Buffer.WriteRune(r) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go deleted file mode 100644 index f0d99ee..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/edge.go +++ /dev/null @@ -1,37 +0,0 @@ -package dag - -import ( - "fmt" -) - -// Edge represents an edge in the graph, with a source and target vertex. -type Edge interface { - Source() Vertex - Target() Vertex - - Hashable -} - -// BasicEdge returns an Edge implementation that simply tracks the source -// and target given as-is. -func BasicEdge(source, target Vertex) Edge { - return &basicEdge{S: source, T: target} -} - -// basicEdge is a basic implementation of Edge that has the source and -// target vertex. -type basicEdge struct { - S, T Vertex -} - -func (e *basicEdge) Hashcode() interface{} { - return fmt.Sprintf("%p-%p", e.S, e.T) -} - -func (e *basicEdge) Source() Vertex { - return e.S -} - -func (e *basicEdge) Target() Vertex { - return e.T -} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go deleted file mode 100644 index e7517a2..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/graph.go +++ /dev/null @@ -1,391 +0,0 @@ -package dag - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "sort" -) - -// Graph is used to represent a dependency graph. -type Graph struct { - vertices *Set - edges *Set - downEdges map[interface{}]*Set - upEdges map[interface{}]*Set - - // JSON encoder for recording debug information - debug *encoder -} - -// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. -type Subgrapher interface { - Subgraph() Grapher -} - -// A Grapher is any type that returns a Grapher, mainly used to identify -// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they -// return themselves. -type Grapher interface { - DirectedGraph() Grapher -} - -// Vertex of the graph. -type Vertex interface{} - -// NamedVertex is an optional interface that can be implemented by Vertex -// to give it a human-friendly name that is used for outputting the graph. -type NamedVertex interface { - Vertex - Name() string -} - -func (g *Graph) DirectedGraph() Grapher { - return g -} - -// Vertices returns the list of all the vertices in the graph. -func (g *Graph) Vertices() []Vertex { - list := g.vertices.List() - result := make([]Vertex, len(list)) - for i, v := range list { - result[i] = v.(Vertex) - } - - return result -} - -// Edges returns the list of all the edges in the graph. -func (g *Graph) Edges() []Edge { - list := g.edges.List() - result := make([]Edge, len(list)) - for i, v := range list { - result[i] = v.(Edge) - } - - return result -} - -// EdgesFrom returns the list of edges from the given source. -func (g *Graph) EdgesFrom(v Vertex) []Edge { - var result []Edge - from := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Source()) == from { - result = append(result, e) - } - } - - return result -} - -// EdgesTo returns the list of edges to the given target. -func (g *Graph) EdgesTo(v Vertex) []Edge { - var result []Edge - search := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Target()) == search { - result = append(result, e) - } - } - - return result -} - -// HasVertex checks if the given Vertex is present in the graph. -func (g *Graph) HasVertex(v Vertex) bool { - return g.vertices.Include(v) -} - -// HasEdge checks if the given Edge is present in the graph. -func (g *Graph) HasEdge(e Edge) bool { - return g.edges.Include(e) -} - -// Add adds a vertex to the graph. This is safe to call multiple time with -// the same Vertex. -func (g *Graph) Add(v Vertex) Vertex { - g.init() - g.vertices.Add(v) - g.debug.Add(v) - return v -} - -// Remove removes a vertex from the graph. This will also remove any -// edges with this vertex as a source or target. -func (g *Graph) Remove(v Vertex) Vertex { - // Delete the vertex itself - g.vertices.Delete(v) - g.debug.Remove(v) - - // Delete the edges to non-existent things - for _, target := range g.DownEdges(v).List() { - g.RemoveEdge(BasicEdge(v, target)) - } - for _, source := range g.UpEdges(v).List() { - g.RemoveEdge(BasicEdge(source, v)) - } - - return nil -} - -// Replace replaces the original Vertex with replacement. If the original -// does not exist within the graph, then false is returned. Otherwise, true -// is returned. -func (g *Graph) Replace(original, replacement Vertex) bool { - // If we don't have the original, we can't do anything - if !g.vertices.Include(original) { - return false - } - - defer g.debug.BeginOperation("Replace", "").End("") - - // If they're the same, then don't do anything - if original == replacement { - return true - } - - // Add our new vertex, then copy all the edges - g.Add(replacement) - for _, target := range g.DownEdges(original).List() { - g.Connect(BasicEdge(replacement, target)) - } - for _, source := range g.UpEdges(original).List() { - g.Connect(BasicEdge(source, replacement)) - } - - // Remove our old vertex, which will also remove all the edges - g.Remove(original) - - return true -} - -// RemoveEdge removes an edge from the graph. -func (g *Graph) RemoveEdge(edge Edge) { - g.init() - g.debug.RemoveEdge(edge) - - // Delete the edge from the set - g.edges.Delete(edge) - - // Delete the up/down edges - if s, ok := g.downEdges[hashcode(edge.Source())]; ok { - s.Delete(edge.Target()) - } - if s, ok := g.upEdges[hashcode(edge.Target())]; ok { - s.Delete(edge.Source()) - } -} - -// DownEdges returns the outward edges from the source Vertex v. -func (g *Graph) DownEdges(v Vertex) *Set { - g.init() - return g.downEdges[hashcode(v)] -} - -// UpEdges returns the inward edges to the destination Vertex v. -func (g *Graph) UpEdges(v Vertex) *Set { - g.init() - return g.upEdges[hashcode(v)] -} - -// Connect adds an edge with the given source and target. This is safe to -// call multiple times with the same value. Note that the same value is -// verified through pointer equality of the vertices, not through the -// value of the edge itself. -func (g *Graph) Connect(edge Edge) { - g.init() - g.debug.Connect(edge) - - source := edge.Source() - target := edge.Target() - sourceCode := hashcode(source) - targetCode := hashcode(target) - - // Do we have this already? If so, don't add it again. - if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { - return - } - - // Add the edge to the set - g.edges.Add(edge) - - // Add the down edge - s, ok := g.downEdges[sourceCode] - if !ok { - s = new(Set) - g.downEdges[sourceCode] = s - } - s.Add(target) - - // Add the up edge - s, ok = g.upEdges[targetCode] - if !ok { - s = new(Set) - g.upEdges[targetCode] = s - } - s.Add(source) -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) StringWithNodeTypes() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - targetNodes := make(map[string]Vertex) - for _, target := range targets.List() { - dep := VertexName(target) - deps = append(deps, dep) - targetNodes[dep] = target - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) - } - } - - return buf.String() -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) String() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s\n", name)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - for _, target := range targets.List() { - deps = append(deps, VertexName(target)) - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s\n", d)) - } - } - - return buf.String() -} - -func (g *Graph) init() { - if g.vertices == nil { - g.vertices = new(Set) - } - if g.edges == nil { - g.edges = new(Set) - } - if g.downEdges == nil { - g.downEdges = make(map[interface{}]*Set) - } - if g.upEdges == nil { - g.upEdges = make(map[interface{}]*Set) - } -} - -// Dot returns a dot-formatted representation of the Graph. -func (g *Graph) Dot(opts *DotOpts) []byte { - return newMarshalGraph("", g).Dot(opts) -} - -// MarshalJSON returns a JSON representation of the entire Graph. -func (g *Graph) MarshalJSON() ([]byte, error) { - dg := newMarshalGraph("root", g) - return json.MarshalIndent(dg, "", " ") -} - -// SetDebugWriter sets the io.Writer where the Graph will record debug -// information. After this is set, the graph will immediately encode itself to -// the stream, and continue to record all subsequent operations. -func (g *Graph) SetDebugWriter(w io.Writer) { - g.debug = &encoder{w: w} - g.debug.Encode(newMarshalGraph("root", g)) -} - -// DebugVertexInfo encodes arbitrary information about a vertex in the graph -// debug logs. -func (g *Graph) DebugVertexInfo(v Vertex, info string) { - va := newVertexInfo(typeVertexInfo, v, info) - g.debug.Encode(va) -} - -// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug -// logs. -func (g *Graph) DebugEdgeInfo(e Edge, info string) { - ea := newEdgeInfo(typeEdgeInfo, e, info) - g.debug.Encode(ea) -} - -// DebugVisitInfo records a visit to a Vertex during a walk operation. -func (g *Graph) DebugVisitInfo(v Vertex, info string) { - vi := newVertexInfo(typeVisitInfo, v, info) - g.debug.Encode(vi) -} - -// DebugOperation marks the start of a set of graph transformations in -// the debug log, and returns a DebugOperationEnd func, which marks the end of -// the operation in the log. Additional information can be added to the log via -// the info parameter. -// -// The returned func's End method allows this method to be called from a single -// defer statement: -// defer g.DebugOperationBegin("OpName", "operating").End("") -// -// The returned function must be called to properly close the logical operation -// in the logs. -func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { - return g.debug.BeginOperation(operation, info) -} - -// VertexName returns the name of a vertex. -func VertexName(raw Vertex) string { - switch v := raw.(type) { - case NamedVertex: - return v.Name() - case fmt.Stringer: - return fmt.Sprintf("%s", v) - default: - return fmt.Sprintf("%v", v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go deleted file mode 100644 index 16d5dd6..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ /dev/null @@ -1,462 +0,0 @@ -package dag - -import ( - "encoding/json" - "fmt" - "io" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - -// the marshal* structs are for serialization of the graph data. -type marshalGraph struct { - // Type is always "Graph", for identification as a top level object in the - // JSON stream. - Type string - - // Each marshal structure requires a unique ID so that it can be referenced - // by other structures. - ID string `json:",omitempty"` - - // Human readable name for this graph. - Name string `json:",omitempty"` - - // Arbitrary attributes that can be added to the output. - Attrs map[string]string `json:",omitempty"` - - // List of graph vertices, sorted by ID. - Vertices []*marshalVertex `json:",omitempty"` - - // List of edges, sorted by Source ID. - Edges []*marshalEdge `json:",omitempty"` - - // Any number of subgraphs. A subgraph itself is considered a vertex, and - // may be referenced by either end of an edge. - Subgraphs []*marshalGraph `json:",omitempty"` - - // Any lists of vertices that are included in cycles. - Cycles [][]*marshalVertex `json:",omitempty"` -} - -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - -func (g *marshalGraph) vertexByID(id string) *marshalVertex { - for _, v := range g.Vertices { - if id == v.ID { - return v - } - } - return nil -} - -type marshalVertex struct { - // Unique ID, used to reference this vertex from other structures. - ID string - - // Human readable name - Name string `json:",omitempty"` - - Attrs map[string]string `json:",omitempty"` - - // This is to help transition from the old Dot interfaces. We record if the - // node was a GraphNodeDotter here, so we can call it to get attributes. - graphNodeDotter GraphNodeDotter -} - -func newMarshalVertex(v Vertex) *marshalVertex { - dn, ok := v.(GraphNodeDotter) - if !ok { - dn = nil - } - - return &marshalVertex{ - ID: marshalVertexID(v), - Name: VertexName(v), - Attrs: make(map[string]string), - graphNodeDotter: dn, - } -} - -// vertices is a sort.Interface implementation for sorting vertices by ID -type vertices []*marshalVertex - -func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } -func (v vertices) Len() int { return len(v) } -func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -type marshalEdge struct { - // Human readable name - Name string - - // Source and Target Vertices by ID - Source string - Target string - - Attrs map[string]string `json:",omitempty"` -} - -func newMarshalEdge(e Edge) *marshalEdge { - return &marshalEdge{ - Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), - Source: marshalVertexID(e.Source()), - Target: marshalVertexID(e.Target()), - Attrs: make(map[string]string), - } -} - -// edges is a sort.Interface implementation for sorting edges by Source ID -type edges []*marshalEdge - -func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -// build a marshalGraph structure from a *Graph -func newMarshalGraph(name string, g *Graph) *marshalGraph { - mg := &marshalGraph{ - Type: "Graph", - Name: name, - Attrs: make(map[string]string), - } - - for _, v := range g.Vertices() { - id := marshalVertexID(v) - if sg, ok := marshalSubgrapher(v); ok { - smg := newMarshalGraph(VertexName(v), sg) - smg.ID = id - mg.Subgraphs = append(mg.Subgraphs, smg) - } - - mv := newMarshalVertex(v) - mg.Vertices = append(mg.Vertices, mv) - } - - sort.Sort(vertices(mg.Vertices)) - - for _, e := range g.Edges() { - mg.Edges = append(mg.Edges, newMarshalEdge(e)) - } - - sort.Sort(edges(mg.Edges)) - - for _, c := range (&AcyclicGraph{*g}).Cycles() { - var cycle []*marshalVertex - for _, v := range c { - mv := newMarshalVertex(v) - cycle = append(cycle, mv) - } - mg.Cycles = append(mg.Cycles, cycle) - } - - return mg -} - -// Attempt to return a unique ID for any vertex. -func marshalVertexID(v Vertex) string { - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return strconv.Itoa(int(val.Pointer())) - case reflect.Interface: - return strconv.Itoa(int(val.InterfaceData()[1])) - } - - if v, ok := v.(Hashable); ok { - h := v.Hashcode() - if h, ok := h.(string); ok { - return h - } - } - - // fallback to a name, which we hope is unique. - return VertexName(v) - - // we could try harder by attempting to read the arbitrary value from the - // interface, but we shouldn't get here from terraform right now. -} - -// check for a Subgrapher, and return the underlying *Graph. -func marshalSubgrapher(v Vertex) (*Graph, bool) { - sg, ok := v.(Subgrapher) - if !ok { - return nil, false - } - - switch g := sg.Subgraph().DirectedGraph().(type) { - case *Graph: - return g, true - case *AcyclicGraph: - return &g.Graph, true - } - - return nil, false -} - -// The DebugOperationEnd func type provides a way to call an End function via a -// method call, allowing for the chaining of methods in a defer statement. -type DebugOperationEnd func(string) - -// End calls function e with the info parameter, marking the end of this -// operation in the logs. -func (e DebugOperationEnd) End(info string) { e(info) } - -// encoder provides methods to write debug data to an io.Writer, and is a noop -// when no writer is present -type encoder struct { - sync.Mutex - w io.Writer -} - -// Encode is analogous to json.Encoder.Encode -func (e *encoder) Encode(i interface{}) { - if e == nil || e.w == nil { - return - } - e.Lock() - defer e.Unlock() - - js, err := json.Marshal(i) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } - js = append(js, '\n') - - _, err = e.w.Write(js) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } -} - -func (e *encoder) Add(v Vertex) { - e.Encode(marshalTransform{ - Type: typeTransform, - AddVertex: newMarshalVertex(v), - }) -} - -// Remove records the removal of Vertex v. -func (e *encoder) Remove(v Vertex) { - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveVertex: newMarshalVertex(v), - }) -} - -func (e *encoder) Connect(edge Edge) { - e.Encode(marshalTransform{ - Type: typeTransform, - AddEdge: newMarshalEdge(edge), - }) -} - -func (e *encoder) RemoveEdge(edge Edge) { - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveEdge: newMarshalEdge(edge), - }) -} - -// BeginOperation marks the start of set of graph transformations, and returns -// an EndDebugOperation func to be called once the opration is complete. -func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { - if e == nil { - return func(string) {} - } - - e.Encode(marshalOperation{ - Type: typeOperation, - Begin: op, - Info: info, - }) - - return func(info string) { - e.Encode(marshalOperation{ - Type: typeOperation, - End: op, - Info: info, - }) - } -} - -// structure for recording graph transformations -type marshalTransform struct { - // Type: "Transform" - Type string - AddEdge *marshalEdge `json:",omitempty"` - RemoveEdge *marshalEdge `json:",omitempty"` - AddVertex *marshalVertex `json:",omitempty"` - RemoveVertex *marshalVertex `json:",omitempty"` -} - -func (t marshalTransform) Transform(g *marshalGraph) { - switch { - case t.AddEdge != nil: - g.connect(t.AddEdge) - case t.RemoveEdge != nil: - g.removeEdge(t.RemoveEdge) - case t.AddVertex != nil: - g.add(t.AddVertex) - case t.RemoveVertex != nil: - g.remove(t.RemoveVertex) - } -} - -// this structure allows us to decode any object in the json stream for -// inspection, then re-decode it into a proper struct if needed. -type streamDecode struct { - Type string - Map map[string]interface{} - JSON []byte -} - -func (s *streamDecode) UnmarshalJSON(d []byte) error { - s.JSON = d - err := json.Unmarshal(d, &s.Map) - if err != nil { - return err - } - - if t, ok := s.Map["Type"]; ok { - s.Type, _ = t.(string) - } - return nil -} - -// structure for recording the beginning and end of any multi-step -// transformations. These are informational, and not required to reproduce the -// graph state. -type marshalOperation struct { - Type string - Begin string `json:",omitempty"` - End string `json:",omitempty"` - Info string `json:",omitempty"` -} - -// decodeGraph decodes a marshalGraph from an encoded graph stream. -func decodeGraph(r io.Reader) (*marshalGraph, error) { - dec := json.NewDecoder(r) - - // a stream should always start with a graph - g := &marshalGraph{} - - err := dec.Decode(g) - if err != nil { - return nil, err - } - - // now replay any operations that occurred on the original graph - for dec.More() { - s := &streamDecode{} - err := dec.Decode(s) - if err != nil { - return g, err - } - - // the only Type we're concerned with here is Transform to complete the - // Graph - if s.Type != typeTransform { - continue - } - - t := &marshalTransform{} - err = json.Unmarshal(s.JSON, t) - if err != nil { - return g, err - } - t.Transform(g) - } - return g, nil -} - -// marshalVertexInfo allows encoding arbitrary information about the a single -// Vertex in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalVertexInfo struct { - Type string - Vertex *marshalVertex - Info string -} - -func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { - return &marshalVertexInfo{ - Type: infoType, - Vertex: newMarshalVertex(v), - Info: info, - } -} - -// marshalEdgeInfo allows encoding arbitrary information about the a single -// Edge in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalEdgeInfo struct { - Type string - Edge *marshalEdge - Info string -} - -func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { - return &marshalEdgeInfo{ - Type: infoType, - Edge: newMarshalEdge(e), - Info: info, - } -} - -// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final -// graph dot format. -// -// TODO: Allow returning the output at a certain point during decode. -// Encode extra information from the json log into the Dot. -func JSON2Dot(r io.Reader) ([]byte, error) { - g, err := decodeGraph(r) - if err != nil { - return nil, err - } - - return g.Dot(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go deleted file mode 100644 index 92b4215..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/set.go +++ /dev/null @@ -1,123 +0,0 @@ -package dag - -import ( - "sync" -) - -// Set is a set data structure. -type Set struct { - m map[interface{}]interface{} - once sync.Once -} - -// Hashable is the interface used by set to get the hash code of a value. -// If this isn't given, then the value of the item being added to the set -// itself is used as the comparison value. -type Hashable interface { - Hashcode() interface{} -} - -// hashcode returns the hashcode used for set elements. -func hashcode(v interface{}) interface{} { - if h, ok := v.(Hashable); ok { - return h.Hashcode() - } - - return v -} - -// Add adds an item to the set -func (s *Set) Add(v interface{}) { - s.once.Do(s.init) - s.m[hashcode(v)] = v -} - -// Delete removes an item from the set. -func (s *Set) Delete(v interface{}) { - s.once.Do(s.init) - delete(s.m, hashcode(v)) -} - -// Include returns true/false of whether a value is in the set. -func (s *Set) Include(v interface{}) bool { - s.once.Do(s.init) - _, ok := s.m[hashcode(v)] - return ok -} - -// Intersection computes the set intersection with other. -func (s *Set) Intersection(other *Set) *Set { - result := new(Set) - if s == nil { - return result - } - if other != nil { - for _, v := range s.m { - if other.Include(v) { - result.Add(v) - } - } - } - - return result -} - -// Difference returns a set with the elements that s has but -// other doesn't. -func (s *Set) Difference(other *Set) *Set { - result := new(Set) - if s != nil { - for k, v := range s.m { - var ok bool - if other != nil { - _, ok = other.m[k] - } - if !ok { - result.Add(v) - } - } - } - - return result -} - -// Filter returns a set that contains the elements from the receiver -// where the given callback returns true. -func (s *Set) Filter(cb func(interface{}) bool) *Set { - result := new(Set) - - for _, v := range s.m { - if cb(v) { - result.Add(v) - } - } - - return result -} - -// Len is the number of items in the set. -func (s *Set) Len() int { - if s == nil { - return 0 - } - - return len(s.m) -} - -// List returns the list of set elements. -func (s *Set) List() []interface{} { - if s == nil { - return nil - } - - r := make([]interface{}, 0, len(s.m)) - for _, v := range s.m { - r = append(r, v) - } - - return r -} - -func (s *Set) init() { - s.m = make(map[interface{}]interface{}) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go deleted file mode 100644 index 9d8b25c..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/tarjan.go +++ /dev/null @@ -1,107 +0,0 @@ -package dag - -// StronglyConnected returns the list of strongly connected components -// within the Graph g. This information is primarily used by this package -// for cycle detection, but strongly connected components have widespread -// use. -func StronglyConnected(g *Graph) [][]Vertex { - vs := g.Vertices() - acct := sccAcct{ - NextIndex: 1, - VertexIndex: make(map[Vertex]int, len(vs)), - } - for _, v := range vs { - // Recurse on any non-visited nodes - if acct.VertexIndex[v] == 0 { - stronglyConnected(&acct, g, v) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { - // Initial vertex visit - index := acct.visit(v) - minIdx := index - - for _, raw := range g.DownEdges(v).List() { - target := raw.(Vertex) - targetIdx := acct.VertexIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, g, target)) - } else if acct.inStack(target) { - // Check if the vertex is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root vertex - if index == minIdx { - var scc []Vertex - for { - v2 := acct.pop() - scc = append(scc, v2) - if v2 == v { - break - } - } - - acct.SCC = append(acct.SCC, scc) - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - NextIndex int - VertexIndex map[Vertex]int - Stack []Vertex - SCC [][]Vertex -} - -// visit assigns an index and pushes a vertex onto the stack -func (s *sccAcct) visit(v Vertex) int { - idx := s.NextIndex - s.VertexIndex[v] = idx - s.NextIndex++ - s.push(v) - return idx -} - -// push adds a vertex to the stack -func (s *sccAcct) push(n Vertex) { - s.Stack = append(s.Stack, n) -} - -// pop removes a vertex from the stack -func (s *sccAcct) pop() Vertex { - n := len(s.Stack) - if n == 0 { - return nil - } - vertex := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return vertex -} - -// inStack checks if a vertex is in the stack -func (s *sccAcct) inStack(needle Vertex) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go deleted file mode 100644 index f03b100..0000000 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ /dev/null @@ -1,445 +0,0 @@ -package dag - -import ( - "errors" - "fmt" - "log" - "sync" - "time" - - "github.com/hashicorp/go-multierror" -) - -// Walker is used to walk every vertex of a graph in parallel. -// -// A vertex will only be walked when the dependencies of that vertex have -// been walked. If two vertices can be walked at the same time, they will be. -// -// Update can be called to update the graph. This can be called even during -// a walk, cahnging vertices/edges mid-walk. This should be done carefully. -// If a vertex is removed but has already been executed, the result of that -// execution (any error) is still returned by Wait. Changing or re-adding -// a vertex that has already executed has no effect. Changing edges of -// a vertex that has already executed has no effect. -// -// Non-parallelism can be enforced by introducing a lock in your callback -// function. However, the goroutine overhead of a walk will remain. -// Walker will create V*2 goroutines (one for each vertex, and dependency -// waiter for each vertex). In general this should be of no concern unless -// there are a huge number of vertices. -// -// The walk is depth first by default. This can be changed with the Reverse -// option. -// -// A single walker is only valid for one graph walk. After the walk is complete -// you must construct a new walker to walk again. State for the walk is never -// deleted in case vertices or edges are changed. -type Walker struct { - // Callback is what is called for each vertex - Callback WalkFunc - - // Reverse, if true, causes the source of an edge to depend on a target. - // When false (default), the target depends on the source. - Reverse bool - - // changeLock must be held to modify any of the fields below. Only Update - // should modify these fields. Modifying them outside of Update can cause - // serious problems. - changeLock sync.Mutex - vertices Set - edges Set - vertexMap map[Vertex]*walkerVertex - - // wait is done when all vertices have executed. It may become "undone" - // if new vertices are added. - wait sync.WaitGroup - - // errMap contains the errors recorded so far for execution. Reading - // and writing should hold errLock. - errMap map[Vertex]error - errLock sync.Mutex -} - -type walkerVertex struct { - // These should only be set once on initialization and never written again. - // They are not protected by a lock since they don't need to be since - // they are write-once. - - // DoneCh is closed when this vertex has completed execution, regardless - // of success. - // - // CancelCh is closed when the vertex should cancel execution. If execution - // is already complete (DoneCh is closed), this has no effect. Otherwise, - // execution is cancelled as quickly as possible. - DoneCh chan struct{} - CancelCh chan struct{} - - // Dependency information. Any changes to any of these fields requires - // holding DepsLock. - // - // DepsCh is sent a single value that denotes whether the upstream deps - // were successful (no errors). Any value sent means that the upstream - // dependencies are complete. No other values will ever be sent again. - // - // DepsUpdateCh is closed when there is a new DepsCh set. - DepsCh chan bool - DepsUpdateCh chan struct{} - DepsLock sync.Mutex - - // Below is not safe to read/write in parallel. This behavior is - // enforced by changes only happening in Update. Nothing else should - // ever modify these. - deps map[Vertex]chan struct{} - depsCancelCh chan struct{} -} - -// errWalkUpstream is used in the errMap of a walk to note that an upstream -// dependency failed so this vertex wasn't run. This is not shown in the final -// user-returned error. -var errWalkUpstream = errors.New("upstream dependency failed") - -// Wait waits for the completion of the walk and returns any errors ( -// in the form of a multierror) that occurred. Update should be called -// to populate the walk with vertices and edges prior to calling this. -// -// Wait will return as soon as all currently known vertices are complete. -// If you plan on calling Update with more vertices in the future, you -// should not call Wait until after this is done. -func (w *Walker) Wait() error { - // Wait for completion - w.wait.Wait() - - // Grab the error lock - w.errLock.Lock() - defer w.errLock.Unlock() - - // Build the error - var result error - for v, err := range w.errMap { - if err != nil && err != errWalkUpstream { - result = multierror.Append(result, fmt.Errorf( - "%s: %s", VertexName(v), err)) - } - } - - return result -} - -// Update updates the currently executing walk with the given graph. -// This will perform a diff of the vertices and edges and update the walker. -// Already completed vertices remain completed (including any errors during -// their execution). -// -// This returns immediately once the walker is updated; it does not wait -// for completion of the walk. -// -// Multiple Updates can be called in parallel. Update can be called at any -// time during a walk. -func (w *Walker) Update(g *AcyclicGraph) { - var v, e *Set - if g != nil { - v, e = g.vertices, g.edges - } - - // Grab the change lock so no more updates happen but also so that - // no new vertices are executed during this time since we may be - // removing them. - w.changeLock.Lock() - defer w.changeLock.Unlock() - - // Initialize fields - if w.vertexMap == nil { - w.vertexMap = make(map[Vertex]*walkerVertex) - } - - // Calculate all our sets - newEdges := e.Difference(&w.edges) - oldEdges := w.edges.Difference(e) - newVerts := v.Difference(&w.vertices) - oldVerts := w.vertices.Difference(v) - - // Add the new vertices - for _, raw := range newVerts.List() { - v := raw.(Vertex) - - // Add to the waitgroup so our walk is not done until everything finishes - w.wait.Add(1) - - // Add to our own set so we know about it already - log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) - w.vertices.Add(raw) - - // Initialize the vertex info - info := &walkerVertex{ - DoneCh: make(chan struct{}), - CancelCh: make(chan struct{}), - deps: make(map[Vertex]chan struct{}), - } - - // Add it to the map and kick off the walk - w.vertexMap[v] = info - } - - // Remove the old vertices - for _, raw := range oldVerts.List() { - v := raw.(Vertex) - - // Get the vertex info so we can cancel it - info, ok := w.vertexMap[v] - if !ok { - // This vertex for some reason was never in our map. This - // shouldn't be possible. - continue - } - - // Cancel the vertex - close(info.CancelCh) - - // Delete it out of the map - delete(w.vertexMap, v) - - log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) - w.vertices.Delete(raw) - } - - // Add the new edges - var changedDeps Set - for _, raw := range newEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Get the info for the dep - depInfo, ok := w.vertexMap[dep] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Add the dependency to our waiter - waiterInfo.deps[dep] = depInfo.DoneCh - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: added edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Add(raw) - } - - // Process reoved edges - for _, raw := range oldEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Delete the dependency from the waiter - delete(waiterInfo.deps, dep) - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: removed edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Delete(raw) - } - - // For each vertex with changed dependencies, we need to kick off - // a new waiter and notify the vertex of the changes. - for _, raw := range changedDeps.List() { - v := raw.(Vertex) - info, ok := w.vertexMap[v] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Create a new done channel - doneCh := make(chan bool, 1) - - // Create the channel we close for cancellation - cancelCh := make(chan struct{}) - - // Build a new deps copy - deps := make(map[Vertex]<-chan struct{}) - for k, v := range info.deps { - deps[k] = v - } - - // Update the update channel - info.DepsLock.Lock() - if info.DepsUpdateCh != nil { - close(info.DepsUpdateCh) - } - info.DepsCh = doneCh - info.DepsUpdateCh = make(chan struct{}) - info.DepsLock.Unlock() - - // Cancel the older waiter - if info.depsCancelCh != nil { - close(info.depsCancelCh) - } - info.depsCancelCh = cancelCh - - log.Printf( - "[TRACE] dag/walk: dependencies changed for %q, sending new deps", - VertexName(v)) - - // Start the waiter - go w.waitDeps(v, deps, doneCh, cancelCh) - } - - // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. - for _, raw := range newVerts.List() { - v := raw.(Vertex) - go w.walkVertex(v, w.vertexMap[v]) - } -} - -// edgeParts returns the waiter and the dependency, in that order. -// The waiter is waiting on the dependency. -func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { - if w.Reverse { - return e.Source(), e.Target() - } - - return e.Target(), e.Source() -} - -// walkVertex walks a single vertex, waiting for any dependencies before -// executing the callback. -func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { - // When we're done executing, lower the waitgroup count - defer w.wait.Done() - - // When we're done, always close our done channel - defer close(info.DoneCh) - - // Wait for our dependencies. We create a [closed] deps channel so - // that we can immediately fall through to load our actual DepsCh. - var depsSuccess bool - var depsUpdateCh chan struct{} - depsCh := make(chan bool, 1) - depsCh <- true - close(depsCh) - for { - select { - case <-info.CancelCh: - // Cancel - return - - case depsSuccess = <-depsCh: - // Deps complete! Mark as nil to trigger completion handling. - depsCh = nil - - case <-depsUpdateCh: - // New deps, reloop - } - - // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occurring. - // In that case, we'd like to take into account new dependencies - // if possible. - info.DepsLock.Lock() - if info.DepsCh != nil { - depsCh = info.DepsCh - info.DepsCh = nil - } - if info.DepsUpdateCh != nil { - depsUpdateCh = info.DepsUpdateCh - } - info.DepsLock.Unlock() - - // If we still have no deps channel set, then we're done! - if depsCh == nil { - break - } - } - - // If we passed dependencies, we just want to check once more that - // we're not cancelled, since this can happen just as dependencies pass. - select { - case <-info.CancelCh: - // Cancelled during an update while dependencies completed. - return - default: - } - - // Run our callback or note that our upstream failed - var err error - if depsSuccess { - log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) - err = w.Callback(v) - } else { - log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) - err = errWalkUpstream - } - - // Record the error - if err != nil { - w.errLock.Lock() - defer w.errLock.Unlock() - - if w.errMap == nil { - w.errMap = make(map[Vertex]error) - } - w.errMap[v] = err - } -} - -func (w *Walker) waitDeps( - v Vertex, - deps map[Vertex]<-chan struct{}, - doneCh chan<- bool, - cancelCh <-chan struct{}) { - // For each dependency given to us, wait for it to complete - for dep, depCh := range deps { - DepSatisfied: - for { - select { - case <-depCh: - // Dependency satisfied! - break DepSatisfied - - case <-cancelCh: - // Wait cancelled. Note that we didn't satisfy dependencies - // so that anything waiting on us also doesn't run. - doneCh <- false - return - - case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", - VertexName(v), VertexName(dep)) - } - } - } - - // Dependencies satisfied! We need to check if any errored - w.errLock.Lock() - defer w.errLock.Unlock() - for dep, _ := range deps { - if w.errMap[dep] != nil { - // One of our dependencies failed, so return false - doneCh <- false - return - } - } - - // All dependencies satisfied and successful - doneCh <- true -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go deleted file mode 100644 index 1449065..0000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hil" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hil.UnknownValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go deleted file mode 100644 index 9ff6e42..0000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go deleted file mode 100644 index 46b72c4..0000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k, _ := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k, _ := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k, _ := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go deleted file mode 100644 index 9d31031..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package acctest contains for Terraform Acceptance Tests -package acctest diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go deleted file mode 100644 index 50b4430..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go +++ /dev/null @@ -1,145 +0,0 @@ -package acctest - -import ( - "bytes" - crand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "math/rand" - "strings" - "time" - - "golang.org/x/crypto/ssh" -) - -// Helpers for generating random tidbits for use in identifiers to prevent -// collisions in acceptance tests. - -// RandInt generates a random integer -func RandInt() int { - reseed() - return rand.New(rand.NewSource(time.Now().UnixNano())).Int() -} - -// RandomWithPrefix is used to generate a unique name with a prefix, for -// randomizing names in acceptance tests -func RandomWithPrefix(name string) string { - reseed() - return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) -} - -func RandIntRange(min int, max int) int { - reseed() - source := rand.New(rand.NewSource(time.Now().UnixNano())) - rangeMax := max - min - - return int(source.Int31n(int32(rangeMax))) -} - -// RandString generates a random alphanumeric string of the length specified -func RandString(strlen int) string { - return RandStringFromCharSet(strlen, CharSetAlphaNum) -} - -// RandStringFromCharSet generates a random string by selecting characters from -// the charset provided -func RandStringFromCharSet(strlen int, charSet string) string { - reseed() - result := make([]byte, strlen) - for i := 0; i < strlen; i++ { - result[i] = charSet[rand.Intn(len(charSet))] - } - return string(result) -} - -// RandSSHKeyPair generates a public and private SSH key pair. The public key is -// returned in OpenSSH format, and the private key is PEM encoded. -func RandSSHKeyPair(comment string) (string, string, error) { - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) - if err != nil { - return "", "", err - } - keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) - return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil -} - -// RandTLSCert generates a self-signed TLS certificate with a newly created -// private key, and returns both the cert and the private key PEM encoded. -func RandTLSCert(orgName string) (string, string, error) { - template := &x509.Certificate{ - SerialNumber: big.NewInt(int64(RandInt())), - Subject: pkix.Name{ - Organization: []string{orgName}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(24 * time.Hour), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) - if err != nil { - return "", "", err - } - - certPEM, err := pemEncode(cert, "CERTIFICATE") - if err != nil { - return "", "", err - } - - return certPEM, privateKeyPEM, nil -} - -func genPrivateKey() (*rsa.PrivateKey, string, error) { - privateKey, err := rsa.GenerateKey(crand.Reader, 1024) - if err != nil { - return nil, "", err - } - - privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") - if err != nil { - return nil, "", err - } - - return privateKey, privateKeyPEM, nil -} - -func pemEncode(b []byte, block string) (string, error) { - var buf bytes.Buffer - pb := &pem.Block{Type: block, Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return "", err - } - - return buf.String(), nil -} - -// Seeds random with current timestamp -func reseed() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -const ( - // CharSetAlphaNum is the alphanumeric character set for use with - // RandStringFromCharSet - CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" - - // CharSetAlpha is the alphabetical character set for use with - // RandStringFromCharSet - CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" -) diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go deleted file mode 100644 index 87c60b8..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go +++ /dev/null @@ -1,27 +0,0 @@ -package acctest - -import ( - "net/http" - "os" - "testing" -) - -// SkipRemoteTestsEnvVar is an environment variable that can be set by a user -// running the tests in an environment with limited network connectivity. By -// default, tests requiring internet connectivity make an effort to skip if no -// internet is available, but in some cases the smoke test will pass even -// though the test should still be skipped. -const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS" - -// RemoteTestPrecheck is meant to be run by any unit test that requires -// outbound internet connectivity. The test will be skipped if it's -// unavailable. -func RemoteTestPrecheck(t *testing.T) { - if os.Getenv(SkipRemoteTestsEnvVar) != "" { - t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar) - } - - if _, err := http.Get("http://google.com"); err != nil { - t.Skipf("skipping, internet seems to not be available: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go deleted file mode 100644 index f470c9b..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/decode.go +++ /dev/null @@ -1,28 +0,0 @@ -package config - -import ( - "github.com/mitchellh/mapstructure" -) - -func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { - var md mapstructure.Metadata - decoderConfig := &mapstructure.DecoderConfig{ - Metadata: &md, - Result: target, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return nil, err - } - - for _, raw := range raws { - err := decoder.Decode(raw) - if err != nil { - return nil, err - } - } - - return &md, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go deleted file mode 100644 index 1a6e023..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/validator.go +++ /dev/null @@ -1,214 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/terraform" -) - -// Validator is a helper that helps you validate the configuration -// of your resource, resource provider, etc. -// -// At the most basic level, set the Required and Optional lists to be -// specifiers of keys that are required or optional. If a key shows up -// that isn't in one of these two lists, then an error is generated. -// -// The "specifiers" allowed in this is a fairly rich syntax to help -// describe the format of your configuration: -// -// * Basic keys are just strings. For example: "foo" will match the -// "foo" key. -// -// * Nested structure keys can be matched by doing -// "listener.*.foo". This will verify that there is at least one -// listener element that has the "foo" key set. -// -// * The existence of a nested structure can be checked by simply -// doing "listener.*" which will verify that there is at least -// one element in the "listener" structure. This is NOT -// validating that "listener" is an array. It is validating -// that it is a nested structure in the configuration. -// -type Validator struct { - Required []string - Optional []string -} - -func (v *Validator) Validate( - c *terraform.ResourceConfig) (ws []string, es []error) { - // Flatten the configuration so it is easier to reason about - flat := flatmap.Flatten(c.Raw) - - keySet := make(map[string]validatorKey) - for i, vs := range [][]string{v.Required, v.Optional} { - req := i == 0 - for _, k := range vs { - vk, err := newValidatorKey(k, req) - if err != nil { - es = append(es, err) - continue - } - - keySet[k] = vk - } - } - - purged := make([]string, 0) - for _, kv := range keySet { - p, w, e := kv.Validate(flat) - if len(w) > 0 { - ws = append(ws, w...) - } - if len(e) > 0 { - es = append(es, e...) - } - - purged = append(purged, p...) - } - - // Delete all the keys we processed in order to find - // the unknown keys. - for _, p := range purged { - delete(flat, p) - } - - // The rest are unknown - for k, _ := range flat { - es = append(es, fmt.Errorf("Unknown configuration: %s", k)) - } - - return -} - -type validatorKey interface { - // Validate validates the given configuration and returns viewed keys, - // warnings, and errors. - Validate(map[string]string) ([]string, []string, []error) -} - -func newValidatorKey(k string, req bool) (validatorKey, error) { - var result validatorKey - - parts := strings.Split(k, ".") - if len(parts) > 1 && parts[1] == "*" { - result = &nestedValidatorKey{ - Parts: parts, - Required: req, - } - } else { - result = &basicValidatorKey{ - Key: k, - Required: req, - } - } - - return result, nil -} - -// basicValidatorKey validates keys that are basic such as "foo" -type basicValidatorKey struct { - Key string - Required bool -} - -func (v *basicValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - for k, _ := range m { - // If we have the exact key its a match - if k == v.Key { - return []string{k}, nil, nil - } - } - - if !v.Required { - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", v.Key)} -} - -type nestedValidatorKey struct { - Parts []string - Required bool -} - -func (v *nestedValidatorKey) validate( - m map[string]string, - prefix string, - offset int) ([]string, []string, []error) { - if offset >= len(v.Parts) { - // We're at the end. Look for a specific key. - v2 := &basicValidatorKey{Key: prefix, Required: v.Required} - return v2.Validate(m) - } - - current := v.Parts[offset] - - // If we're at offset 0, special case to start at the next one. - if offset == 0 { - return v.validate(m, current, offset+1) - } - - // Determine if we're doing a "for all" or a specific key - if current != "*" { - // We're looking at a specific key, continue on. - return v.validate(m, prefix+"."+current, offset+1) - } - - // We're doing a "for all", so we loop over. - countStr, ok := m[prefix+".#"] - if !ok { - if !v.Required { - // It wasn't required, so its no problem. - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", prefix)} - } - - count, err := strconv.ParseInt(countStr, 0, 0) - if err != nil { - // This shouldn't happen if flatmap works properly - panic("invalid flatmap array") - } - - var e []error - var w []string - u := make([]string, 1, count+1) - u[0] = prefix + ".#" - for i := 0; i < int(count); i++ { - prefix := fmt.Sprintf("%s.%d", prefix, i) - - // Mark that we saw this specific key - u = append(u, prefix) - - // Mark all prefixes of this - for k, _ := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - u = append(u, k) - } - - // If we have more parts, then validate deeper - if offset+1 < len(v.Parts) { - u2, w2, e2 := v.validate(m, prefix, offset+1) - - u = append(u, u2...) - w = append(w, w2...) - e = append(e, e2...) - } - } - - return u, w, e -} - -func (v *nestedValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - return v.validate(m, "", 0) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go deleted file mode 100644 index 18b8837..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go +++ /dev/null @@ -1,154 +0,0 @@ -// experiment package contains helper functions for tracking experimental -// features throughout Terraform. -// -// This package should be used for creating, enabling, querying, and deleting -// experimental features. By unifying all of that onto a single interface, -// we can have the Go compiler help us by enforcing every place we touch -// an experimental feature. -// -// To create a new experiment: -// -// 1. Add the experiment to the global vars list below, prefixed with X_ -// -// 2. Add the experiment variable to the All listin the init() function -// -// 3. Use it! -// -// To remove an experiment: -// -// 1. Delete the experiment global var. -// -// 2. Try to compile and fix all the places where the var was referenced. -// -// To use an experiment: -// -// 1. Use Flag() if you want the experiment to be available from the CLI. -// -// 2. Use Enabled() to check whether it is enabled. -// -// As a general user: -// -// 1. The `-Xexperiment-name` flag -// 2. The `TF_X_` env var. -// 3. The `TF_X_FORCE` env var can be set to force an experimental feature -// without human verifications. -// -package experiment - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "sync" -) - -// The experiments that are available are listed below. Any package in -// Terraform defining an experiment should define the experiments below. -// By keeping them all within the experiment package we force a single point -// of definition and use. This allows the compiler to enforce references -// so it becomes easy to remove the features. -var ( - // Shadow graph. This is already on by default. Disabling it will be - // allowed for awhile in order for it to not block operations. - X_shadow = newBasicID("shadow", "SHADOW", false) -) - -// Global variables this package uses because we are a package -// with global state. -var ( - // all is the list of all experiements. Do not modify this. - All []ID - - // enabled keeps track of what flags have been enabled - enabled map[string]bool - enabledLock sync.Mutex - - // Hidden "experiment" that forces all others to be on without verification - x_force = newBasicID("force", "FORCE", false) -) - -func init() { - // The list of all experiments, update this when an experiment is added. - All = []ID{ - X_shadow, - x_force, - } - - // Load - reload() -} - -// reload is used by tests to reload the global state. This is called by -// init publicly. -func reload() { - // Initialize - enabledLock.Lock() - enabled = make(map[string]bool) - enabledLock.Unlock() - - // Set defaults and check env vars - for _, id := range All { - // Get the default value - def := id.Default() - - // If we set it in the env var, default it to true - key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env())) - if v := os.Getenv(key); v != "" { - def = v != "0" - } - - // Set the default - SetEnabled(id, def) - } -} - -// Enabled returns whether an experiment has been enabled or not. -func Enabled(id ID) bool { - enabledLock.Lock() - defer enabledLock.Unlock() - return enabled[id.Flag()] -} - -// SetEnabled sets an experiment to enabled/disabled. Please check with -// the experiment docs for when calling this actually affects the experiment. -func SetEnabled(id ID, v bool) { - enabledLock.Lock() - defer enabledLock.Unlock() - enabled[id.Flag()] = v -} - -// Force returns true if the -Xforce of TF_X_FORCE flag is present, which -// advises users of this package to not verify with the user that they want -// experimental behavior and to just continue with it. -func Force() bool { - return Enabled(x_force) -} - -// Flag configures the given FlagSet with the flags to configure -// all active experiments. -func Flag(fs *flag.FlagSet) { - for _, id := range All { - desc := id.Flag() - key := fmt.Sprintf("X%s", id.Flag()) - fs.Var(&idValue{X: id}, key, desc) - } -} - -// idValue implements flag.Value for setting the enabled/disabled state -// of an experiment from the CLI. -type idValue struct { - X ID -} - -func (v *idValue) IsBoolFlag() bool { return true } -func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) } -func (v *idValue) Set(raw string) error { - b, err := strconv.ParseBool(raw) - if err == nil { - SetEnabled(v.X, b) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go deleted file mode 100644 index 8e2f707..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go +++ /dev/null @@ -1,34 +0,0 @@ -package experiment - -// ID represents an experimental feature. -// -// The global vars defined on this package should be used as ID values. -// This interface is purposely not implement-able outside of this package -// so that we can rely on the Go compiler to enforce all experiment references. -type ID interface { - Env() string - Flag() string - Default() bool - - unexported() // So the ID can't be implemented externally. -} - -// basicID implements ID. -type basicID struct { - EnvValue string - FlagValue string - DefaultValue bool -} - -func newBasicID(flag, env string, def bool) ID { - return &basicID{ - EnvValue: env, - FlagValue: flag, - DefaultValue: def, - } -} - -func (id *basicID) Env() string { return id.EnvValue } -func (id *basicID) Flag() string { return id.FlagValue } -func (id *basicID) Default() bool { return id.DefaultValue } -func (id *basicID) unexported() {} diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go deleted file mode 100644 index 64d8263..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go +++ /dev/null @@ -1,22 +0,0 @@ -package hashcode - -import ( - "hash/crc32" -) - -// String hashes a string to a unique hashcode. -// -// crc32 returns a uint32, but for our use we need -// and non negative integer. Here we cast to an integer -// and invert it if the result is negative. -func String(s string) int { - v := int(crc32.ChecksumIEEE([]byte(s))) - if v >= 0 { - return v - } - if -v >= 0 { - return -v - } - // v == MinInt - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go deleted file mode 100644 index 67be1df..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go +++ /dev/null @@ -1,41 +0,0 @@ -package hilmapstructure - -import ( - "fmt" - "reflect" - - "github.com/mitchellh/mapstructure" -) - -var hilMapstructureDecodeHookEmptySlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookEmptyMap map[string]interface{} - -// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a -// DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func WeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}") - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go deleted file mode 100644 index 433cd77..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ /dev/null @@ -1,100 +0,0 @@ -package logging - -import ( - "io" - "io/ioutil" - "log" - "os" - "strings" - "syscall" - - "github.com/hashicorp/logutils" -) - -// These are the environmental variables that determine if we log, and if -// we log whether or not the log should go to a file. -const ( - EnvLog = "TF_LOG" // Set to True - EnvLogFile = "TF_LOG_PATH" // Set to a file -) - -var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} - -// LogOutput determines where we should send logs (if anywhere) and the log level. -func LogOutput() (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := LogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - if logPath := os.Getenv(EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logutils.LevelFilter{ - Levels: validLevels, - MinLevel: logutils.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - -// SetOutput checks for a log destination with LogOutput, and calls -// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses -// ioutil.Discard. Any error from LogOutout is fatal. -func SetOutput() { - out, err := LogOutput() - if err != nil { - log.Fatal(err) - } - - if out == nil { - out = ioutil.Discard - } - - log.SetOutput(out) -} - -// LogLevel returns the current log level string based the environment vars -func LogLevel() string { - envLevel := os.Getenv(EnvLog) - if envLevel == "" { - return "" - } - - logLevel := "TRACE" - if isValidLogLevel(envLevel) { - // allow following for better ux: info, Info or INFO - logLevel = strings.ToUpper(envLevel) - } else { - log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, validLevels) - } - - return logLevel -} - -// IsDebugOrHigher returns whether or not the current log level is debug or trace -func IsDebugOrHigher() bool { - level := string(LogLevel()) - return level == "DEBUG" || level == "TRACE" -} - -func isValidLogLevel(level string) bool { - for _, l := range validLevels { - if strings.ToUpper(level) == string(l) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go deleted file mode 100644 index 4477924..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go +++ /dev/null @@ -1,53 +0,0 @@ -package logging - -import ( - "log" - "net/http" - "net/http/httputil" -) - -type transport struct { - name string - transport http.RoundTripper -} - -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - if IsDebugOrHigher() { - reqData, err := httputil.DumpRequestOut(req, true) - if err == nil { - log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData)) - } else { - log.Printf("[ERROR] %s API Request error: %#v", t.name, err) - } - } - - resp, err := t.transport.RoundTrip(req) - if err != nil { - return resp, err - } - - if IsDebugOrHigher() { - respData, err := httputil.DumpResponse(resp, true) - if err == nil { - log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData)) - } else { - log.Printf("[ERROR] %s API Response error: %#v", t.name, err) - } - } - - return resp, nil -} - -func NewTransport(name string, t http.RoundTripper) *transport { - return &transport{name, t} -} - -const logReqMsg = `%s API Request Details: ----[ REQUEST ]--------------------------------------- -%s ------------------------------------------------------` - -const logRespMsg = `%s API Response Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform/helper/mutexkv/mutexkv.go b/vendor/github.com/hashicorp/terraform/helper/mutexkv/mutexkv.go deleted file mode 100644 index 6917f21..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/mutexkv/mutexkv.go +++ /dev/null @@ -1,51 +0,0 @@ -package mutexkv - -import ( - "log" - "sync" -) - -// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to -// serialize changes across arbitrary collaborators that share knowledge of the -// keys they must serialize on. -// -// The initial use case is to let aws_security_group_rule resources serialize -// their access to individual security groups based on SG ID. -type MutexKV struct { - lock sync.Mutex - store map[string]*sync.Mutex -} - -// Locks the mutex for the given key. Caller is responsible for calling Unlock -// for the same key -func (m *MutexKV) Lock(key string) { - log.Printf("[DEBUG] Locking %q", key) - m.get(key).Lock() - log.Printf("[DEBUG] Locked %q", key) -} - -// Unlock the mutex for the given key. Caller must have called Lock for the same key first -func (m *MutexKV) Unlock(key string) { - log.Printf("[DEBUG] Unlocking %q", key) - m.get(key).Unlock() - log.Printf("[DEBUG] Unlocked %q", key) -} - -// Returns a mutex for the given key, no guarantee of its lock status -func (m *MutexKV) get(key string) *sync.Mutex { - m.lock.Lock() - defer m.lock.Unlock() - mutex, ok := m.store[key] - if !ok { - mutex = &sync.Mutex{} - m.store[key] = mutex - } - return mutex -} - -// Returns a properly initalized MutexKV -func NewMutexKV() *MutexKV { - return &MutexKV{ - store: make(map[string]*sync.Mutex), - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go deleted file mode 100644 index 7ee2161..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/error.go +++ /dev/null @@ -1,79 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "time" -) - -type NotFoundError struct { - LastError error - LastRequest interface{} - LastResponse interface{} - Message string - Retries int -} - -func (e *NotFoundError) Error() string { - if e.Message != "" { - return e.Message - } - - if e.Retries > 0 { - return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) - } - - return "couldn't find resource" -} - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -type UnexpectedStateError struct { - LastError error - State string - ExpectedState []string -} - -func (e *UnexpectedStateError) Error() string { - return fmt.Sprintf( - "unexpected state '%s', wanted target '%s'. last error: %s", - e.State, - strings.Join(e.ExpectedState, ", "), - e.LastError, - ) -} - -// TimeoutError is returned when WaitForState times out -type TimeoutError struct { - LastError error - LastState string - Timeout time.Duration - ExpectedState []string -} - -func (e *TimeoutError) Error() string { - expectedState := "resource to be gone" - if len(e.ExpectedState) > 0 { - expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) - } - - extraInfo := make([]string, 0) - if e.LastState != "" { - extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) - } - if e.Timeout > 0 { - extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) - } - - suffix := "" - if len(extraInfo) > 0 { - suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) - } - - if e.LastError != nil { - return fmt.Sprintf("timeout while waiting for %s%s: %s", - expectedState, suffix, e.LastError) - } - - return fmt.Sprintf("timeout while waiting for %s%s", - expectedState, suffix) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go deleted file mode 100644 index 1cde67c..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/id.go +++ /dev/null @@ -1,40 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "sync" - "time" -) - -const UniqueIdPrefix = `terraform-` - -// idCounter is a monotonic counter for generating ordered unique ids. -var idMutex sync.Mutex -var idCounter uint32 - -// Helper for a resource to generate a unique identifier w/ default prefix -func UniqueId() string { - return PrefixedUniqueId(UniqueIdPrefix) -} - -// Helper for a resource to generate a unique identifier w/ given prefix -// -// After the prefix, the ID consists of an incrementing 26 digit value (to match -// previous timestamp output). After the prefix, the ID consists of a timestamp -// and an incrementing 8 hex digit value The timestamp means that multiple IDs -// created with the same prefix will sort in the order of their creation, even -// across multiple terraform executions, as long as the clock is not turned back -// between calls, and as long as any given terraform execution generates fewer -// than 4 billion IDs. -func PrefixedUniqueId(prefix string) string { - // Be precise to 4 digits of fractional seconds, but remove the dot before the - // fractional seconds. - timestamp := strings.Replace( - time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) - - idMutex.Lock() - defer idMutex.Unlock() - idCounter++ - return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go deleted file mode 100644 index a465136..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/map.go +++ /dev/null @@ -1,140 +0,0 @@ -package resource - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform/terraform" -) - -// Map is a map of resources that are supported, and provides helpers for -// more easily implementing a ResourceProvider. -type Map struct { - Mapping map[string]Resource -} - -func (m *Map) Validate( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := m.Mapping[t] - if !ok { - return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} - } - - // If there is no validator set, then it is valid - if r.ConfigValidator == nil { - return nil, nil - } - - return r.ConfigValidator.Validate(c) -} - -// Apply performs a create or update depending on the diff, and calls -// the proper function on the matching Resource. -func (m *Map) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource if it is created - err := r.Destroy(s, meta) - if err != nil { - return s, err - } - - s.ID = "" - } - - // If we're only destroying, and not creating, then return now. - // Otherwise, we continue so that we can create a new resource. - if !d.RequiresNew() { - return nil, nil - } - } - - var result *terraform.InstanceState - var err error - if s.ID == "" { - result, err = r.Create(s, d, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf( - "Resource type '%s' doesn't support update", - info.Type) - } - - result, err = r.Update(s, d, meta) - } - if result != nil { - if result.Attributes == nil { - result.Attributes = make(map[string]string) - } - - result.Attributes["id"] = result.ID - } - - return result, err -} - -// Diff performs a diff on the proper resource type. -func (m *Map) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, meta) -} - -// Refresh performs a Refresh on the proper resource type. -// -// Refresh on the Resource won't be called if the state represents a -// non-created resource (ID is blank). -// -// An error is returned if the resource isn't registered. -func (m *Map) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the resource isn't created, don't refresh. - if s.ID == "" { - return s, nil - } - - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Refresh(s, meta) -} - -// Resources returns all the resources that are supported by this -// resource map and can be used to satisfy the Resources method of -// a ResourceProvider. -func (m *Map) Resources() []terraform.ResourceType { - ks := make([]string, 0, len(m.Mapping)) - for k, _ := range m.Mapping { - ks = append(ks, k) - } - sort.Strings(ks) - - rs := make([]terraform.ResourceType, 0, len(m.Mapping)) - for _, k := range ks { - rs = append(rs, terraform.ResourceType{ - Name: k, - }) - } - - return rs -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go deleted file mode 100644 index 0d9c831..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go +++ /dev/null @@ -1,49 +0,0 @@ -package resource - -import ( - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/terraform" -) - -type Resource struct { - ConfigValidator *config.Validator - Create CreateFunc - Destroy DestroyFunc - Diff DiffFunc - Refresh RefreshFunc - Update UpdateFunc -} - -// CreateFunc is a function that creates a resource that didn't previously -// exist. -type CreateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) - -// DestroyFunc is a function that destroys a resource that previously -// exists using the state. -type DestroyFunc func( - *terraform.InstanceState, - interface{}) error - -// DiffFunc is a function that performs a diff of a resource. -type DiffFunc func( - *terraform.InstanceState, - *terraform.ResourceConfig, - interface{}) (*terraform.InstanceDiff, error) - -// RefreshFunc is a function that performs a refresh of a specific type -// of resource. -type RefreshFunc func( - *terraform.InstanceState, - interface{}) (*terraform.InstanceState, error) - -// UpdateFunc is a function that is called to update a resource that -// previously existed. The difference between this and CreateFunc is that -// the diff is guaranteed to only contain attributes that don't require -// a new resource. -type UpdateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go deleted file mode 100644 index 37c586a..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ /dev/null @@ -1,259 +0,0 @@ -package resource - -import ( - "log" - "time" -) - -var refreshGracePeriod = 30 * time.Second - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Delay time.Duration // Wait this time before starting checks - Pending []string // States that are "allowed" and will continue trying - Refresh StateRefreshFunc // Refreshes the current state - Target []string // Target state - Timeout time.Duration // The amount of time to wait before timeout - MinTimeout time.Duration // Smallest time to wait before refreshes - PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found - - // This is to work around inconsistent APIs - ContinuousTargetOccurence int // Number of times the Target state has to occur continuously -} - -// WaitForState watches an object and waits for it to achieve the state -// specified in the configuration using the specified Refresh() func, -// waiting the number of seconds specified in the timeout configuration. -// -// If the Refresh function returns a error, exit immediately with that error. -// -// If the Refresh function returns a state other than the Target state or one -// listed in Pending, return immediately with an error. -// -// If the Timeout is exceeded before reaching the Target state, return an -// error. -// -// Otherwise, result the result of the first call to the Refresh function to -// reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { - log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) - - notfoundTick := 0 - targetOccurence := 0 - - // Set a default for times to check for not found - if conf.NotFoundChecks == 0 { - conf.NotFoundChecks = 20 - } - - if conf.ContinuousTargetOccurence == 0 { - conf.ContinuousTargetOccurence = 1 - } - - type Result struct { - Result interface{} - State string - Error error - Done bool - } - - // Read every result from the refresh loop, waiting for a positive result.Done. - resCh := make(chan Result, 1) - // cancellation channel for the refresh loop - cancelCh := make(chan struct{}) - - result := Result{} - - go func() { - defer close(resCh) - - time.Sleep(conf.Delay) - - // start with 0 delay for the first loop - var wait time.Duration - - for { - // store the last result - resCh <- result - - // wait and watch for cancellation - select { - case <-cancelCh: - return - case <-time.After(wait): - // first round had no wait - if wait == 0 { - wait = 100 * time.Millisecond - } - } - - res, currentState, err := conf.Refresh() - result = Result{ - Result: res, - State: currentState, - Error: err, - } - - if err != nil { - resCh <- result - return - } - - // If we're waiting for the absence of a thing, then return - if res == nil && len(conf.Target) == 0 { - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - - if res == nil { - // If we didn't find the resource, check if we have been - // not finding it for awhile, and if so, report an error. - notfoundTick++ - if notfoundTick > conf.NotFoundChecks { - result.Error = &NotFoundError{ - LastError: err, - Retries: notfoundTick, - } - resCh <- result - return - } - } else { - // Reset the counter for when a resource isn't found - notfoundTick = 0 - found := false - - for _, allowed := range conf.Target { - if currentState == allowed { - found = true - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - } - - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurence = 0 - break - } - } - - if !found && len(conf.Pending) > 0 { - result.Error = &UnexpectedStateError{ - LastError: err, - State: result.State, - ExpectedState: conf.Target, - } - resCh <- result - return - } - } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } - - // If a poll interval has been specified, choose that interval. - // Otherwise bound the default value. - if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { - wait = conf.PollInterval - } else { - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second - } - } - - log.Printf("[TRACE] Waiting %s before next try", wait) - } - }() - - // store the last value result from the refresh loop - lastResult := Result{} - - timeout := time.After(conf.Timeout) - for { - select { - case r, ok := <-resCh: - // channel closed, so return the last result - if !ok { - return lastResult.Result, lastResult.Error - } - - // we reached the intended state - if r.Done { - return r.Result, r.Error - } - - // still waiting, store the last result - lastResult = r - - case <-timeout: - log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) - log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) - - // cancel the goroutine and start our grace period timer - close(cancelCh) - timeout := time.After(refreshGracePeriod) - - // we need a for loop and a label to break on, because we may have - // an extra response value to read, but still want to wait for the - // channel to close. - forSelect: - for { - select { - case r, ok := <-resCh: - if r.Done { - // the last refresh loop reached the desired state - return r.Result, r.Error - } - - if !ok { - // the goroutine returned - break forSelect - } - - // target state not reached, save the result for the - // TimeoutError and wait for the channel to close - lastResult = r - case <-timeout: - log.Println("[ERROR] WaitForState exceeded refresh grace period") - break forSelect - } - } - - return nil, &TimeoutError{ - LastError: lastResult.Error, - LastState: lastResult.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go deleted file mode 100644 index d7de1a0..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ /dev/null @@ -1,958 +0,0 @@ -package resource - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "regexp" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/go-getter" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/terraform" -) - -// flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to -// run in. This flag bypasses the normal Test path and instead runs functions designed to -// clean up any leaked resources a testing environment could have created. It is -// a best effort attempt, and relies on Provider authors to implement "Sweeper" -// methods for resources. - -// Adding Sweeper methods with AddTestSweepers will -// construct a list of sweeper funcs to be called here. We iterate through -// regions provided by the sweep flag, and for each region we iterate through the -// tests, and exit on any errors. At time of writing, sweepers are ran -// sequentially, however they can list dependencies to be ran first. We track -// the sweepers that have been ran, so as to not run a sweeper twice for a given -// region. -// -// WARNING: -// Sweepers are designed to be destructive. You should not use the -sweep flag -// in any environment that is not strictly a test environment. Resources will be -// destroyed. - -var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") -var sweeperFuncs map[string]*Sweeper - -// map of sweepers that have ran, and the success/fail status based on any error -// raised -var sweeperRunList map[string]bool - -// type SweeperFunc is a signature for a function that acts as a sweeper. It -// accepts a string for the region that the sweeper is to be ran in. This -// function must be able to construct a valid client for that region. -type SweeperFunc func(r string) error - -type Sweeper struct { - // Name for sweeper. Must be unique to be ran by the Sweeper Runner - Name string - - // Dependencies list the const names of other Sweeper functions that must be ran - // prior to running this Sweeper. This is an ordered list that will be invoked - // recursively at the helper/resource level - Dependencies []string - - // Sweeper function that when invoked sweeps the Provider of specific - // resources - F SweeperFunc -} - -func init() { - sweeperFuncs = make(map[string]*Sweeper) -} - -// AddTestSweepers function adds a given name and Sweeper configuration -// pair to the internal sweeperFuncs map. Invoke this function to register a -// resource sweeper to be available for running when the -sweep flag is used -// with `go test`. Sweeper names must be unique to help ensure a given sweeper -// is only ran once per run. -func AddTestSweepers(name string, s *Sweeper) { - if _, ok := sweeperFuncs[name]; ok { - log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) - } - - sweeperFuncs[name] = s -} - -func TestMain(m *testing.M) { - flag.Parse() - if *flagSweep != "" { - // parse flagSweep contents for regions to run - regions := strings.Split(*flagSweep, ",") - - // get filtered list of sweepers to run based on sweep-run flag - sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) - for _, region := range regions { - region = strings.TrimSpace(region) - // reset sweeperRunList for each region - sweeperRunList = map[string]bool{} - - log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) - for _, sweeper := range sweepers { - if err := runSweeperWithRegion(region, sweeper); err != nil { - log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) - } - } - - log.Printf("Sweeper Tests ran:\n") - for s, _ := range sweeperRunList { - fmt.Printf("\t- %s\n", s) - } - } - } else { - os.Exit(m.Run()) - } -} - -// filterSweepers takes a comma seperated string listing the names of sweepers -// to be ran, and returns a filtered set from the list of all of sweepers to -// run based on the names given. -func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { - filterSlice := strings.Split(strings.ToLower(f), ",") - if len(filterSlice) == 1 && filterSlice[0] == "" { - // if the filter slice is a single element of "" then no sweeper list was - // given, so just return the full list - return source - } - - sweepers := make(map[string]*Sweeper) - for name, sweeper := range source { - for _, s := range filterSlice { - if strings.Contains(strings.ToLower(name), s) { - sweepers[name] = sweeper - } - } - } - return sweepers -} - -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls -// itself with that region for every dependency found for that sweeper. If there -// are no dependencies, invoke the contained sweeper fun with the region, and -// add the success/fail status to the sweeperRunList. -func runSweeperWithRegion(region string, s *Sweeper) error { - for _, dep := range s.Dependencies { - if depSweeper, ok := sweeperFuncs[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - if err := runSweeperWithRegion(region, depSweeper); err != nil { - return err - } - } else { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) - } - } - - if _, ok := sweeperRunList[s.Name]; ok { - log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) - return nil - } - - runE := s.F(region) - if runE == nil { - sweeperRunList[s.Name] = true - } else { - sweeperRunList[s.Name] = false - } - - return runE -} - -const TestEnvVar = "TF_ACC" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} - -// TestCheckFunc is the callback type used with acceptance tests to check -// the state of a resource. The state passed in is the latest state known, -// or in the case of being after a destroy, it is the last known state when -// it was created. -type TestCheckFunc func(*terraform.State) error - -// ImportStateCheckFunc is the check function for ImportState tests -type ImportStateCheckFunc func([]*terraform.InstanceState) error - -// TestCase is a single acceptance test case used to test the apply/destroy -// lifecycle of a resource in a specific configuration. -// -// When the destroy plan is executed, the config from the last TestStep -// is used to plan it. -type TestCase struct { - // IsUnitTest allows a test to run regardless of the TF_ACC - // environment variable. This should be used with care - only for - // fast tests on local resources (e.g. remote state with a local - // backend) but can be used to increase confidence in correct - // operation of Terraform without waiting for a full acctest run. - IsUnitTest bool - - // PreCheck, if non-nil, will be called before any test steps are - // executed. It will only be executed in the case that the steps - // would run, so it can be used for some validation before running - // acceptance tests, such as verifying that keys are setup. - PreCheck func() - - // Providers is the ResourceProvider that will be under test. - // - // Alternately, ProviderFactories can be specified for the providers - // that are valid. This takes priority over Providers. - // - // The end effect of each is the same: specifying the providers that - // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // CheckDestroy is called after the resource is finally destroyed - // to allow the tester to test that the resource is truly gone. - CheckDestroy TestCheckFunc - - // Steps are the apply sequences done within the context of the - // same state. Each step can have its own check to verify correctness. - Steps []TestStep - - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. - // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string - IDRefreshIgnore []string -} - -// TestStep is a single apply sequence of a test, done within the -// context of a state. -// -// Multiple TestSteps can be sequenced in a Test to allow testing -// potentially complex update logic. In general, simply create/destroy -// tests will only need one step. -type TestStep struct { - // ResourceName should be set to the name of the resource - // that is being tested. Example: "aws_instance.foo". Various test - // modes use this to auto-detect state information. - // - // This is only required if the test mode settings below say it is - // for the mode you're using. - ResourceName string - - // PreConfig is called before the Config is applied to perform any per-step - // setup that needs to happen. This is called regardless of "test mode" - // below. - PreConfig func() - - //--------------------------------------------------------------- - // Test modes. One of the following groups of settings must be - // set to determine what the test step will do. Ideally we would've - // used Go interfaces here but there are now hundreds of tests we don't - // want to re-type so instead we just determine which step logic - // to run based on what settings below are set. - //--------------------------------------------------------------- - - //--------------------------------------------------------------- - // Plan, Apply testing - //--------------------------------------------------------------- - - // Config a string of the configuration to give to Terraform. If this - // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. - Config string - - // Check is called after the Config is applied. Use this step to - // make your own API calls to check the status of things, and to - // inspect the format of the ResourceState itself. - // - // If an error is returned, the test will fail. In this case, a - // destroy plan will still be attempted. - // - // If this is nil, no check is done on this step. - Check TestCheckFunc - - // Destroy will create a destroy plan if set to true. - Destroy bool - - // ExpectNonEmptyPlan can be set to true for specific types of tests that are - // looking to verify that a diff occurs - ExpectNonEmptyPlan bool - - // ExpectError allows the construction of test cases that we expect to fail - // with an error. The specified regexp must match against the error for the - // test to pass. - ExpectError *regexp.Regexp - - // PlanOnly can be set to only run `plan` with this configuration, and not - // actually apply it. This is useful for ensuring config changes result in - // no-op plans - PlanOnly bool - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - //--------------------------------------------------------------- - // ImportState testing - //--------------------------------------------------------------- - - // ImportState, if true, will test the functionality of ImportState - // by importing the resource with ResourceName (must be set) and the - // ID of that resource. - ImportState bool - - // ImportStateId is the ID to perform an ImportState operation with. - // This is optional. If it isn't set, then the resource ID is automatically - // determined by inspecting the state for ResourceName's ID. - ImportStateId string - - // ImportStateIdPrefix is the prefix added in front of ImportStateId. - // This can be useful in complex import cases, where more than one - // attribute needs to be passed on as the Import ID. Mainly in cases - // where the ID is not known, and a known prefix needs to be added to - // the unset ImportStateId field. - ImportStateIdPrefix string - - // ImportStateCheck checks the results of ImportState. It should be - // used to verify that the resulting value of ImportState has the - // proper resources, IDs, and attributes. - ImportStateCheck ImportStateCheckFunc - - // ImportStateVerify, if true, will also check that the state values - // that are finally put into the state after import match for all the - // IDs returned by the Import. - // - // ImportStateVerifyIgnore are fields that should not be verified to - // be equal. These can be set to ephemeral fields or fields that can't - // be refreshed and don't matter. - ImportStateVerify bool - ImportStateVerifyIgnore []string -} - -// Test performs an acceptance test on a resource. -// -// Tests are not run unless an environmental variable "TF_ACC" is -// set to some non-empty value. This is to avoid test cases surprising -// a user by creating real resources. -// -// Tests will fail unless the verbose flag (`go test -v`, or explicitly -// the "-test.v" flag) is set. Because some acceptance tests take quite -// long, we require the verbose flag so users are able to see progress -// output. -func Test(t TestT, c TestCase) { - // We only run acceptance tests if an env var is set because they're - // slow and generally require some outside configuration. You can opt out - // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) - return - } - - logWriter, err := logging.LogOutput() - if err != nil { - t.Error(fmt.Errorf("error setting up logging: %s", err)) - } - log.SetOutput(logWriter) - - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - return - } - - // Run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - - providerResolver, err := testProviderResolver(c) - if err != nil { - t.Fatal(err) - } - opts := terraform.ContextOpts{ProviderResolver: providerResolver} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - var err error - log.Printf("[WARN] Test: Executing step %d", i) - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config = testProviderConfig(c) - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf( - "Step %d error: %s", i, err)) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } - } - } - - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } - } - - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") - } -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) string { - var lines []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - - return strings.Join(lines, "") -} - -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (terraform.ResourceProviderResolver, error) { - ctxProviders := c.ProviderFactories - if ctxProviders == nil { - ctxProviders = make(map[string]terraform.ResourceProviderFactory) - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - - // reset the providers if needed - for k, pf := range ctxProviders { - // we can ignore any errors here, if we don't have a provider to reset - // the error will be handled later - p, err := pf() - if err != nil { - return nil, err - } - if p, ok := p.(TestProvider); ok { - err := p.TestReset() - if err != nil { - return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) - } - } - } - - return terraform.ResourceProviderResolverFixed(ctxProviders), nil -} - -// UnitTest is a helper to force the acceptance testing harness to run in the -// normal unit test suite. This should only be used for resource that don't -// have any external dependencies. -func UnitTest(t TestT, c TestCase) { - c.IsUnitTest = true - Test(t, c) -} - -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - name := fmt.Sprintf("%s.foo", r.Type) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := terraform.NewState() - state.RootModule().Resources[name] = &terraform.ResourceState{ - Type: r.Type, - Primary: &terraform.InstanceState{ - ID: r.Primary.ID, - }, - } - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - mod, err := testModule(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Module = mod - opts.State = state - ctx, err := terraform.NewContext(&opts) - if err != nil { - return err - } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) - } - - log.Printf("[WARN] Config warnings: %#v", ws) - } - - // Refresh! - state, err = ctx.Refresh() - if err != nil { - return fmt.Errorf("Error refreshing: %s", err) - } - - // Verify attribute equivalence. - actualR := state.RootModule().Resources[name] - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Primary == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Primary.Attributes - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k, _ := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k, _ := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testModule( - opts terraform.ContextOpts, - step TestStep) (*module.Tree, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf( - "Error creating temporary directory for config: %s", err) - } - defer os.RemoveAll(cfgPath) - - // Write the configuration - cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) - if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) - } - - _, err = io.Copy(cfgF, strings.NewReader(step.Config)) - cfgF.Close() - if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) - } - - // Parse the configuration - mod, err := module.NewTreeModule("", cfgPath) - if err != nil { - return nil, fmt.Errorf( - "Error loading configuration: %s", err) - } - - // Load the modules - modStorage := &getter.FolderStorage{ - StorageDir: filepath.Join(cfgPath, ".tfmodules"), - } - err = mod.Load(modStorage, module.GetModeGet) - if err != nil { - return nil, fmt.Errorf("Error downloading modules: %s", err) - } - - return mod, nil -} - -func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.ResourceName]; ok { - return v, nil - } - } - } - - return nil, fmt.Errorf( - "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) -} - -// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - for i, f := range fs { - if err := f(s); err != nil { - return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) - } - } - - return nil - } -} - -// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -// -// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the -// TestCheckFuncs and aggregates failures. -func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - var result *multierror.Error - - for i, f := range fs { - if err := f(s); err != nil { - result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) - } - } - - return result.ErrorOrNil() - } -} - -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. -func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - if val, ok := is.Attributes[key]; ok && val != "" { - return nil - } - - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) - } -} - -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. -func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - if v, ok := is.Attributes[key]; !ok || v != value { - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } - - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) - } - - return nil - } -} - -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. -func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - if _, ok := is.Attributes[key]; ok { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) - } - - return nil - } -} - -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. -func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) - } - - return nil - } -} - -// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the -// value is a pointer so that it can be updated while the test is running. -// It will only be dereferenced at the point this step is run. -func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckResourceAttr(name, key, *value)(s) - } -} - -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. -func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - vFirst, ok := isFirst.Attributes[keyFirst] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) - } - - isSecond, err := primaryInstanceState(s, nameSecond) - if err != nil { - return err - } - vSecond, ok := isSecond.Attributes[keySecond] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) - } - - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) - } - - return nil - } -} - -// TestCheckOutput checks an output in the Terraform configuration -func TestCheckOutput(name, value string) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Value != value { - return fmt.Errorf( - "Output '%s': expected %#v, got %#v", - name, - value, - rs) - } - - return nil - } -} - -func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if !r.MatchString(rs.Value.(string)) { - return fmt.Errorf( - "Output '%s': %#v didn't match %q", - name, - rs, - r.String()) - } - - return nil - } -} - -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - -// primaryInstanceState returns the primary instance state for the given resource name. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() - rs, ok := ms.Resources[name] - if !ok { - return nil, fmt.Errorf("Not found: %s", name) - } - - is := rs.Primary - if is == nil { - return nil, fmt.Errorf("No primary instance: %s", name) - } - - return is, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go deleted file mode 100644 index 537a11c..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ /dev/null @@ -1,160 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - mod, err := testModule(opts, step) - if err != nil { - return state, err - } - - // Build the context - opts.Module = mod - opts.State = state - opts.Destroy = step.Destroy - ctx, err := terraform.NewContext(&opts) - if err != nil { - return state, fmt.Errorf("Error initializing context: %s", err) - } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return state, fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) - } - log.Printf("[WARN] Config warnings: %#v", ws) - } - - // Refresh! - state, err = ctx.Refresh() - if err != nil { - return state, fmt.Errorf( - "Error refreshing: %s", err) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - if p, err := ctx.Plan(); err != nil { - return state, fmt.Errorf( - "Error planning: %s", err) - } else { - log.Printf("[WARN] Test: Step plan: %s", p) - } - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behaviour in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply! - state, err = ctx.Apply() - if err != nil { - return state, fmt.Errorf("Error applying: %s", err) - } - - // Check! Excitement! - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - var p *terraform.Plan - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on follow-up plan: %s", err) - } - if p.Diff != nil && !p.Diff.Empty() { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", p) - } - } - - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - state, err = ctx.Refresh() - if err != nil { - return state, fmt.Errorf( - "Error on follow-up refresh: %s", err) - } - } - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on second follow-up plan: %s", err) - } - empty := p.Diff == nil || p.Diff.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy { - empty = true - - for _, moduleDiff := range p.Diff.Modules { - for k, instanceDiff := range moduleDiff.Resources { - if !strings.HasPrefix(k, "data.") { - empty = false - break - } - - if !instanceDiff.Destroy { - empty = false - } - } - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", p) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go deleted file mode 100644 index 28ad105..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ /dev/null @@ -1,141 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/terraform" -) - -// testStepImportState runs an imort state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - // Determine the ID to import - importId := step.ImportStateId - if importId == "" { - resource, err := testResource(step, state) - if err != nil { - return state, err - } - - importId = resource.Primary.ID - } - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - mod, err := testModule(opts, step) - if err != nil { - return state, err - } - - opts.Module = mod - opts.State = terraform.NewState() - ctx, err := terraform.NewContext(&opts) - if err != nil { - return state, err - } - - // Do the import! - newState, err := ctx.Import(&terraform.ImportOpts{ - // Set the module so that any provider config is loaded - Module: mod, - - Targets: []*terraform.ImportTarget{ - &terraform.ImportTarget{ - Addr: step.ResourceName, - ID: importId, - }, - }, - }) - if err != nil { - log.Printf("[ERROR] Test: ImportState failure: %s", err) - return state, err - } - - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - states = append(states, r.Primary) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - actual[k] = v - } - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k, _ := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k, _ := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go deleted file mode 100644 index ca50e29..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package resource - -import ( - "sync" - "time" -) - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - c := &StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() - - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr -} - -// RetryFunc is the function retried until it succeeds. -type RetryFunc func() *RetryError - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -type RetryError struct { - Err error - Retryable bool -} - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. -func RetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: true} -} - -// NonRetryableError is a helper to create a RetryError that's _not)_ retryable -// from a given error. -func NonRetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: false} -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md deleted file mode 100644 index 28c8362..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Terraform Helper Lib: schema - -The `schema` package provides a high-level interface for writing resource -providers for Terraform. - -If you're writing a resource provider, we recommend you use this package. - -The interface exposed by this package is much friendlier than trying to -write to the Terraform API directly. The core Terraform API is low-level -and built for maximum flexibility and control, whereas this library is built -as a framework around that to more easily write common providers. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go deleted file mode 100644 index a0729c0..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go +++ /dev/null @@ -1,94 +0,0 @@ -package schema - -import ( - "context" - - "github.com/hashicorp/terraform/terraform" -) - -// Backend represents a partial backend.Backend implementation and simplifies -// the creation of configuration loading and validation. -// -// Unlike other schema structs such as Provider, this struct is meant to be -// embedded within your actual implementation. It provides implementations -// only for Input and Configure and gives you a method for accessing the -// configuration in the form of a ResourceData that you're expected to call -// from the other implementation funcs. -type Backend struct { - // Schema is the schema for the configuration of this backend. If this - // Backend has no configuration this can be omitted. - Schema map[string]*Schema - - // ConfigureFunc is called to configure the backend. Use the - // FromContext* methods to extract information from the context. - // This can be nil, in which case nothing will be called but the - // config will still be stored. - ConfigureFunc func(context.Context) error - - config *ResourceData -} - -var ( - backendConfigKey = contextKey("backend config") -) - -// FromContextBackendConfig extracts a ResourceData with the configuration -// from the context. This should only be called by Backend functions. -func FromContextBackendConfig(ctx context.Context) *ResourceData { - return ctx.Value(backendConfigKey).(*ResourceData) -} - -func (b *Backend) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - if b == nil { - return c, nil - } - - return schemaMap(b.Schema).Input(input, c) -} - -func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if b == nil { - return nil, nil - } - - return schemaMap(b.Schema).Validate(c) -} - -func (b *Backend) Configure(c *terraform.ResourceConfig) error { - if b == nil { - return nil - } - - sm := schemaMap(b.Schema) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) - if err != nil { - return err - } - - data, err := sm.Data(nil, diff) - if err != nil { - return err - } - b.config = data - - if b.ConfigureFunc != nil { - err = b.ConfigureFunc(context.WithValue( - context.Background(), backendConfigKey, data)) - if err != nil { - return err - } - } - - return nil -} - -// Config returns the configuration. This is available after Configure is -// called. -func (b *Backend) Config() *ResourceData { - return b.config -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go deleted file mode 100644 index 5a03d2d..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go +++ /dev/null @@ -1,59 +0,0 @@ -package schema - -import ( - "fmt" -) - -// DataSourceResourceShim takes a Resource instance describing a data source -// (with a Read implementation and a Schema, at least) and returns a new -// Resource instance with additional Create and Delete implementations that -// allow the data source to be used as a resource. -// -// This is a backward-compatibility layer for data sources that were formerly -// read-only resources before the data source concept was added. It should not -// be used for any *new* data sources. -// -// The Read function for the data source *must* call d.SetId with a non-empty -// id in order for this shim to function as expected. -// -// The provided Resource instance, and its schema, will be modified in-place -// to make it suitable for use as a full resource. -func DataSourceResourceShim(name string, dataSource *Resource) *Resource { - // Recursively, in-place adjust the schema so that it has ForceNew - // on any user-settable resource. - dataSourceResourceShimAdjustSchema(dataSource.Schema) - - dataSource.Create = CreateFunc(dataSource.Read) - dataSource.Delete = func(d *ResourceData, meta interface{}) error { - d.SetId("") - return nil - } - dataSource.Update = nil // should already be nil, but let's make sure - - // FIXME: Link to some further docs either on the website or in the - // changelog, once such a thing exists. - dataSource.deprecationMessage = fmt.Sprintf( - "using %s as a resource is deprecated; consider using the data source instead", - name, - ) - - return dataSource -} - -func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { - for _, s := range schema { - // If the attribute is configurable then it must be ForceNew, - // since we have no Update implementation. - if s.Required || s.Optional { - s.ForceNew = true - } - - // If the attribute is a nested resource, we need to recursively - // apply these same adjustments to it. - if s.Elem != nil { - if r, ok := s.Elem.(*Resource); ok { - dataSourceResourceShimAdjustSchema(r.Schema) - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go deleted file mode 100644 index d5e20e0..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go +++ /dev/null @@ -1,6 +0,0 @@ -package schema - -// Equal is an interface that checks for deep equality between two objects. -type Equal interface { - Equal(interface{}) bool -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go deleted file mode 100644 index 1660a67..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go +++ /dev/null @@ -1,334 +0,0 @@ -package schema - -import ( - "fmt" - "strconv" -) - -// FieldReaders are responsible for decoding fields out of data into -// the proper typed representation. ResourceData uses this to query data -// out of multiple sources: config, state, diffs, etc. -type FieldReader interface { - ReadField([]string) (FieldReadResult, error) -} - -// FieldReadResult encapsulates all the resulting data from reading -// a field. -type FieldReadResult struct { - // Value is the actual read value. NegValue is the _negative_ value - // or the items that should be removed (if they existed). NegValue - // doesn't make sense for primitives but is important for any - // container types such as maps, sets, lists. - Value interface{} - ValueProcessed interface{} - - // Exists is true if the field was found in the data. False means - // it wasn't found if there was no error. - Exists bool - - // Computed is true if the field was found but the value - // is computed. - Computed bool -} - -// ValueOrZero returns the value of this result or the zero value of the -// schema type, ensuring a consistent non-nil return value. -func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { - if r.Value != nil { - return r.Value - } - - return s.ZeroValue() -} - -// addrToSchema finds the final element schema for the given address -// and the given schema. It returns all the schemas that led to the final -// schema. These are in order of the address (out to in). -func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { - current := &Schema{ - Type: typeObject, - Elem: schemaMap, - } - - // If we aren't given an address, then the user is requesting the - // full object, so we return the special value which is the full object. - if len(addr) == 0 { - return []*Schema{current} - } - - result := make([]*Schema, 0, len(addr)) - for len(addr) > 0 { - k := addr[0] - addr = addr[1:] - - REPEAT: - // We want to trim off the first "typeObject" since its not a - // real lookup that people do. i.e. []string{"foo"} in a structure - // isn't {typeObject, typeString}, its just a {typeString}. - if len(result) > 0 || current.Type != typeObject { - result = append(result, current) - } - - switch t := current.Type; t { - case TypeBool, TypeInt, TypeFloat, TypeString: - if len(addr) > 0 { - return nil - } - case TypeList, TypeSet: - isIndex := len(addr) > 0 && addr[0] == "#" - - switch v := current.Elem.(type) { - case *Resource: - current = &Schema{ - Type: typeObject, - Elem: v.Schema, - } - case *Schema: - current = v - case ValueType: - current = &Schema{Type: v} - default: - // we may not know the Elem type and are just looking for the - // index - if isIndex { - break - } - - if len(addr) == 0 { - // we've processed the address, so return what we've - // collected - return result - } - - if len(addr) == 1 { - if _, err := strconv.Atoi(addr[0]); err == nil { - // we're indexing a value without a schema. This can - // happen if the list is nested in another schema type. - // Default to a TypeString like we do with a map - current = &Schema{Type: TypeString} - break - } - } - - return nil - } - - // If we only have one more thing and the next thing - // is a #, then we're accessing the index which is always - // an int. - if isIndex { - current = &Schema{Type: TypeInt} - break - } - - case TypeMap: - if len(addr) > 0 { - switch v := current.Elem.(type) { - case ValueType: - current = &Schema{Type: v} - default: - // maps default to string values. This is all we can have - // if this is nested in another list or map. - current = &Schema{Type: TypeString} - } - } - case typeObject: - // If we're already in the object, then we want to handle Sets - // and Lists specially. Basically, their next key is the lookup - // key (the set value or the list element). For these scenarios, - // we just want to skip it and move to the next element if there - // is one. - if len(result) > 0 { - lastType := result[len(result)-2].Type - if lastType == TypeSet || lastType == TypeList { - if len(addr) == 0 { - break - } - - k = addr[0] - addr = addr[1:] - } - } - - m := current.Elem.(map[string]*Schema) - val, ok := m[k] - if !ok { - return nil - } - - current = val - goto REPEAT - } - } - - return result -} - -// readListField is a generic method for reading a list field out of a -// a FieldReader. It does this based on the assumption that there is a key -// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. -// after that point. -func readListField( - r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { - addrPadded := make([]string, len(addr)+1) - copy(addrPadded, addr) - addrPadded[len(addrPadded)-1] = "#" - - // Get the number of elements in the list - countResult, err := r.ReadField(addrPadded) - if err != nil { - return FieldReadResult{}, err - } - if !countResult.Exists { - // No count, means we have no list - countResult.Value = 0 - } - - // If we have an empty list, then return an empty list - if countResult.Computed || countResult.Value.(int) == 0 { - return FieldReadResult{ - Value: []interface{}{}, - Exists: countResult.Exists, - Computed: countResult.Computed, - }, nil - } - - // Go through each count, and get the item value out of it - result := make([]interface{}, countResult.Value.(int)) - for i, _ := range result { - is := strconv.FormatInt(int64(i), 10) - addrPadded[len(addrPadded)-1] = is - rawResult, err := r.ReadField(addrPadded) - if err != nil { - return FieldReadResult{}, err - } - if !rawResult.Exists { - // This should never happen, because by the time the data - // gets to the FieldReaders, all the defaults should be set by - // Schema. - rawResult.Value = nil - } - - result[i] = rawResult.Value - } - - return FieldReadResult{ - Value: result, - Exists: true, - }, nil -} - -// readObjectField is a generic method for reading objects out of FieldReaders -// based on the assumption that building an address of []string{k, FIELD} -// will result in the proper field data. -func readObjectField( - r FieldReader, - addr []string, - schema map[string]*Schema) (FieldReadResult, error) { - result := make(map[string]interface{}) - exists := false - for field, s := range schema { - addrRead := make([]string, len(addr), len(addr)+1) - copy(addrRead, addr) - addrRead = append(addrRead, field) - rawResult, err := r.ReadField(addrRead) - if err != nil { - return FieldReadResult{}, err - } - if rawResult.Exists { - exists = true - } - - result[field] = rawResult.ValueOrZero(s) - } - - return FieldReadResult{ - Value: result, - Exists: exists, - }, nil -} - -// convert map values to the proper primitive type based on schema.Elem -func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error { - - elemType := TypeString - if et, ok := schema.Elem.(ValueType); ok { - elemType = et - } - - switch elemType { - case TypeInt, TypeFloat, TypeBool: - for k, v := range m { - vs, ok := v.(string) - if !ok { - continue - } - - v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) - if err != nil { - return err - } - - m[k] = v - } - } - return nil -} - -func stringToPrimitive( - value string, computed bool, schema *Schema) (interface{}, error) { - var returnVal interface{} - switch schema.Type { - case TypeBool: - if value == "" { - returnVal = false - break - } - if computed { - break - } - - v, err := strconv.ParseBool(value) - if err != nil { - return nil, err - } - - returnVal = v - case TypeFloat: - if value == "" { - returnVal = 0.0 - break - } - if computed { - break - } - - v, err := strconv.ParseFloat(value, 64) - if err != nil { - return nil, err - } - - returnVal = v - case TypeInt: - if value == "" { - returnVal = 0 - break - } - if computed { - break - } - - v, err := strconv.ParseInt(value, 0, 0) - if err != nil { - return nil, err - } - - returnVal = int(v) - case TypeString: - returnVal = value - default: - panic(fmt.Sprintf("Unknown type: %s", schema.Type)) - } - - return returnVal, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go deleted file mode 100644 index f958bbc..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go +++ /dev/null @@ -1,333 +0,0 @@ -package schema - -import ( - "fmt" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" -) - -// ConfigFieldReader reads fields out of an untyped map[string]string to the -// best of its ability. It also applies defaults from the Schema. (The other -// field readers do not need default handling because they source fully -// populated data structures.) -type ConfigFieldReader struct { - Config *terraform.ResourceConfig - Schema map[string]*Schema - - indexMaps map[string]map[string]int - once sync.Once -} - -func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { - r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) - return r.readField(address, false) -} - -func (r *ConfigFieldReader) readField( - address []string, nested bool) (FieldReadResult, error) { - schemaList := addrToSchema(address, r.Schema) - if len(schemaList) == 0 { - return FieldReadResult{}, nil - } - - if !nested { - // If we have a set anywhere in the address, then we need to - // read that set out in order and actually replace that part of - // the address with the real list index. i.e. set.50 might actually - // map to set.12 in the config, since it is in list order in the - // config, not indexed by set value. - for i, v := range schemaList { - // Sets are the only thing that cause this issue. - if v.Type != TypeSet { - continue - } - - // If we're at the end of the list, then we don't have to worry - // about this because we're just requesting the whole set. - if i == len(schemaList)-1 { - continue - } - - // If we're looking for the count, then ignore... - if address[i+1] == "#" { - continue - } - - indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] - if !ok { - // Get the set so we can get the index map that tells us the - // mapping of the hash code to the list index - _, err := r.readSet(address[:i+1], v) - if err != nil { - return FieldReadResult{}, err - } - indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] - } - - index, ok := indexMap[address[i+1]] - if !ok { - return FieldReadResult{}, nil - } - - address[i+1] = strconv.FormatInt(int64(index), 10) - } - } - - k := strings.Join(address, ".") - schema := schemaList[len(schemaList)-1] - - // If we're getting the single element of a promoted list, then - // check to see if we have a single element we need to promote. - if address[len(address)-1] == "0" && len(schemaList) > 1 { - lastSchema := schemaList[len(schemaList)-2] - if lastSchema.Type == TypeList && lastSchema.PromoteSingle { - k := strings.Join(address[:len(address)-1], ".") - result, err := r.readPrimitive(k, schema) - if err == nil { - return result, nil - } - } - } - - switch schema.Type { - case TypeBool, TypeFloat, TypeInt, TypeString: - return r.readPrimitive(k, schema) - case TypeList: - // If we support promotion then we first check if we have a lone - // value that we must promote. - // a value that is alone. - if schema.PromoteSingle { - result, err := r.readPrimitive(k, schema.Elem.(*Schema)) - if err == nil && result.Exists { - result.Value = []interface{}{result.Value} - return result, nil - } - } - - return readListField(&nestedConfigFieldReader{r}, address, schema) - case TypeMap: - return r.readMap(k, schema) - case TypeSet: - return r.readSet(address, schema) - case typeObject: - return readObjectField( - &nestedConfigFieldReader{r}, - address, schema.Elem.(map[string]*Schema)) - default: - panic(fmt.Sprintf("Unknown type: %s", schema.Type)) - } -} - -func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { - // We want both the raw value and the interpolated. We use the interpolated - // to store actual values and we use the raw one to check for - // computed keys. Actual values are obtained in the switch, depending on - // the type of the raw value. - mraw, ok := r.Config.GetRaw(k) - if !ok { - // check if this is from an interpolated field by seeing if it exists - // in the config - _, ok := r.Config.Get(k) - if !ok { - // this really doesn't exist - return FieldReadResult{}, nil - } - - // We couldn't fetch the value from a nested data structure, so treat the - // raw value as an interpolation string. The mraw value is only used - // for the type switch below. - mraw = "${INTERPOLATED}" - } - - result := make(map[string]interface{}) - computed := false - switch m := mraw.(type) { - case string: - // This is a map which has come out of an interpolated variable, so we - // can just get the value directly from config. Values cannot be computed - // currently. - v, _ := r.Config.Get(k) - - // If this isn't a map[string]interface, it must be computed. - mapV, ok := v.(map[string]interface{}) - if !ok { - return FieldReadResult{ - Exists: true, - Computed: true, - }, nil - } - - // Otherwise we can proceed as usual. - for i, iv := range mapV { - result[i] = iv - } - case []interface{}: - for i, innerRaw := range m { - for ik := range innerRaw.(map[string]interface{}) { - key := fmt.Sprintf("%s.%d.%s", k, i, ik) - if r.Config.IsComputed(key) { - computed = true - break - } - - v, _ := r.Config.Get(key) - result[ik] = v - } - } - case []map[string]interface{}: - for i, innerRaw := range m { - for ik := range innerRaw { - key := fmt.Sprintf("%s.%d.%s", k, i, ik) - if r.Config.IsComputed(key) { - computed = true - break - } - - v, _ := r.Config.Get(key) - result[ik] = v - } - } - case map[string]interface{}: - for ik := range m { - key := fmt.Sprintf("%s.%s", k, ik) - if r.Config.IsComputed(key) { - computed = true - break - } - - v, _ := r.Config.Get(key) - result[ik] = v - } - default: - panic(fmt.Sprintf("unknown type: %#v", mraw)) - } - - err := mapValuesToPrimitive(result, schema) - if err != nil { - return FieldReadResult{}, nil - } - - var value interface{} - if !computed { - value = result - } - - return FieldReadResult{ - Value: value, - Exists: true, - Computed: computed, - }, nil -} - -func (r *ConfigFieldReader) readPrimitive( - k string, schema *Schema) (FieldReadResult, error) { - raw, ok := r.Config.Get(k) - if !ok { - // Nothing in config, but we might still have a default from the schema - var err error - raw, err = schema.DefaultValue() - if err != nil { - return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err) - } - - if raw == nil { - return FieldReadResult{}, nil - } - } - - var result string - if err := mapstructure.WeakDecode(raw, &result); err != nil { - return FieldReadResult{}, err - } - - computed := r.Config.IsComputed(k) - returnVal, err := stringToPrimitive(result, computed, schema) - if err != nil { - return FieldReadResult{}, err - } - - return FieldReadResult{ - Value: returnVal, - Exists: true, - Computed: computed, - }, nil -} - -func (r *ConfigFieldReader) readSet( - address []string, schema *Schema) (FieldReadResult, error) { - indexMap := make(map[string]int) - // Create the set that will be our result - set := schema.ZeroValue().(*Set) - - raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) - if err != nil { - return FieldReadResult{}, err - } - if !raw.Exists { - return FieldReadResult{Value: set}, nil - } - - // If the list is computed, the set is necessarilly computed - if raw.Computed { - return FieldReadResult{ - Value: set, - Exists: true, - Computed: raw.Computed, - }, nil - } - - // Build up the set from the list elements - for i, v := range raw.Value.([]interface{}) { - // Check if any of the keys in this item are computed - computed := r.hasComputedSubKeys( - fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) - - code := set.add(v, computed) - indexMap[code] = i - } - - r.indexMaps[strings.Join(address, ".")] = indexMap - - return FieldReadResult{ - Value: set, - Exists: true, - }, nil -} - -// hasComputedSubKeys walks through a schema and returns whether or not the -// given key contains any subkeys that are computed. -func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { - prefix := key + "." - - switch t := schema.Elem.(type) { - case *Resource: - for k, schema := range t.Schema { - if r.Config.IsComputed(prefix + k) { - return true - } - - if r.hasComputedSubKeys(prefix+k, schema) { - return true - } - } - } - - return false -} - -// nestedConfigFieldReader is a funny little thing that just wraps a -// ConfigFieldReader to call readField when ReadField is called so that -// we don't recalculate the set rewrites in the address, which leads to -// an infinite loop. -type nestedConfigFieldReader struct { - Reader *ConfigFieldReader -} - -func (r *nestedConfigFieldReader) ReadField( - address []string) (FieldReadResult, error) { - return r.Reader.readField(address, true) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go deleted file mode 100644 index 644b93e..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ /dev/null @@ -1,238 +0,0 @@ -package schema - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" -) - -// DiffFieldReader reads fields out of a diff structures. -// -// It also requires access to a Reader that reads fields from the structure -// that the diff was derived from. This is usually the state. This is required -// because a diff on its own doesn't have complete data about full objects -// such as maps. -// -// The Source MUST be the data that the diff was derived from. If it isn't, -// the behavior of this struct is undefined. -// -// Reading fields from a DiffFieldReader is identical to reading from -// Source except the diff will be applied to the end result. -// -// The "Exists" field on the result will be set to true if the complete -// field exists whether its from the source, diff, or a combination of both. -// It cannot be determined whether a retrieved value is composed of -// diff elements. -type DiffFieldReader struct { - Diff *terraform.InstanceDiff - Source FieldReader - Schema map[string]*Schema - - // cache for memoizing ReadField calls. - cache map[string]cachedFieldReadResult -} - -type cachedFieldReadResult struct { - val FieldReadResult - err error -} - -func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { - if r.cache == nil { - r.cache = make(map[string]cachedFieldReadResult) - } - - // Create the cache key by joining around a value that isn't a valid part - // of an address. This assumes that the Source and Schema are not changed - // for the life of this DiffFieldReader. - cacheKey := strings.Join(address, "|") - if cached, ok := r.cache[cacheKey]; ok { - return cached.val, cached.err - } - - schemaList := addrToSchema(address, r.Schema) - if len(schemaList) == 0 { - r.cache[cacheKey] = cachedFieldReadResult{} - return FieldReadResult{}, nil - } - - var res FieldReadResult - var err error - - schema := schemaList[len(schemaList)-1] - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - res, err = r.readPrimitive(address, schema) - case TypeList: - res, err = readListField(r, address, schema) - case TypeMap: - res, err = r.readMap(address, schema) - case TypeSet: - res, err = r.readSet(address, schema) - case typeObject: - res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) - default: - panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) - } - - r.cache[cacheKey] = cachedFieldReadResult{ - val: res, - err: err, - } - return res, err -} - -func (r *DiffFieldReader) readMap( - address []string, schema *Schema) (FieldReadResult, error) { - result := make(map[string]interface{}) - resultSet := false - - // First read the map from the underlying source - source, err := r.Source.ReadField(address) - if err != nil { - return FieldReadResult{}, err - } - if source.Exists { - result = source.Value.(map[string]interface{}) - resultSet = true - } - - // Next, read all the elements we have in our diff, and apply - // the diff to our result. - prefix := strings.Join(address, ".") + "." - for k, v := range r.Diff.Attributes { - if !strings.HasPrefix(k, prefix) { - continue - } - if strings.HasPrefix(k, prefix+"%") { - // Ignore the count field - continue - } - - resultSet = true - - k = k[len(prefix):] - if v.NewRemoved { - delete(result, k) - continue - } - - result[k] = v.New - } - - err = mapValuesToPrimitive(result, schema) - if err != nil { - return FieldReadResult{}, nil - } - - var resultVal interface{} - if resultSet { - resultVal = result - } - - return FieldReadResult{ - Value: resultVal, - Exists: resultSet, - }, nil -} - -func (r *DiffFieldReader) readPrimitive( - address []string, schema *Schema) (FieldReadResult, error) { - result, err := r.Source.ReadField(address) - if err != nil { - return FieldReadResult{}, err - } - - attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] - if !ok { - return result, nil - } - - var resultVal string - if !attrD.NewComputed { - resultVal = attrD.New - if attrD.NewExtra != nil { - result.ValueProcessed = resultVal - if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { - return FieldReadResult{}, err - } - } - } - - result.Computed = attrD.NewComputed - result.Exists = true - result.Value, err = stringToPrimitive(resultVal, false, schema) - if err != nil { - return FieldReadResult{}, err - } - - return result, nil -} - -func (r *DiffFieldReader) readSet( - address []string, schema *Schema) (FieldReadResult, error) { - prefix := strings.Join(address, ".") + "." - - // Create the set that will be our result - set := schema.ZeroValue().(*Set) - - // Go through the map and find all the set items - for k, d := range r.Diff.Attributes { - if d.NewRemoved { - // If the field is removed, we always ignore it - continue - } - if !strings.HasPrefix(k, prefix) { - continue - } - if strings.HasSuffix(k, "#") { - // Ignore any count field - continue - } - - // Split the key, since it might be a sub-object like "idx.field" - parts := strings.Split(k[len(prefix):], ".") - idx := parts[0] - - raw, err := r.ReadField(append(address, idx)) - if err != nil { - return FieldReadResult{}, err - } - if !raw.Exists { - // This shouldn't happen because we just verified it does exist - panic("missing field in set: " + k + "." + idx) - } - - set.Add(raw.Value) - } - - // Determine if the set "exists". It exists if there are items or if - // the diff explicitly wanted it empty. - exists := set.Len() > 0 - if !exists { - // We could check if the diff value is "0" here but I think the - // existence of "#" on its own is enough to show it existed. This - // protects us in the future from the zero value changing from - // "0" to "" breaking us (if that were to happen). - if _, ok := r.Diff.Attributes[prefix+"#"]; ok { - exists = true - } - } - - if !exists { - result, err := r.Source.ReadField(address) - if err != nil { - return FieldReadResult{}, err - } - if result.Exists { - return result, nil - } - } - - return FieldReadResult{ - Value: set, - Exists: exists, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go deleted file mode 100644 index 9533981..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go +++ /dev/null @@ -1,232 +0,0 @@ -package schema - -import ( - "fmt" - "strings" -) - -// MapFieldReader reads fields out of an untyped map[string]string to -// the best of its ability. -type MapFieldReader struct { - Map MapReader - Schema map[string]*Schema -} - -func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { - k := strings.Join(address, ".") - schemaList := addrToSchema(address, r.Schema) - if len(schemaList) == 0 { - return FieldReadResult{}, nil - } - - schema := schemaList[len(schemaList)-1] - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - return r.readPrimitive(address, schema) - case TypeList: - return readListField(r, address, schema) - case TypeMap: - return r.readMap(k, schema) - case TypeSet: - return r.readSet(address, schema) - case typeObject: - return readObjectField(r, address, schema.Elem.(map[string]*Schema)) - default: - panic(fmt.Sprintf("Unknown type: %s", schema.Type)) - } -} - -func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { - result := make(map[string]interface{}) - resultSet := false - - // If the name of the map field is directly in the map with an - // empty string, it means that the map is being deleted, so mark - // that is is set. - if v, ok := r.Map.Access(k); ok && v == "" { - resultSet = true - } - - prefix := k + "." - r.Map.Range(func(k, v string) bool { - if strings.HasPrefix(k, prefix) { - resultSet = true - - key := k[len(prefix):] - if key != "%" && key != "#" { - result[key] = v - } - } - - return true - }) - - err := mapValuesToPrimitive(result, schema) - if err != nil { - return FieldReadResult{}, nil - } - - var resultVal interface{} - if resultSet { - resultVal = result - } - - return FieldReadResult{ - Value: resultVal, - Exists: resultSet, - }, nil -} - -func (r *MapFieldReader) readPrimitive( - address []string, schema *Schema) (FieldReadResult, error) { - k := strings.Join(address, ".") - result, ok := r.Map.Access(k) - if !ok { - return FieldReadResult{}, nil - } - - returnVal, err := stringToPrimitive(result, false, schema) - if err != nil { - return FieldReadResult{}, err - } - - return FieldReadResult{ - Value: returnVal, - Exists: true, - }, nil -} - -func (r *MapFieldReader) readSet( - address []string, schema *Schema) (FieldReadResult, error) { - // Get the number of elements in the list - countRaw, err := r.readPrimitive( - append(address, "#"), &Schema{Type: TypeInt}) - if err != nil { - return FieldReadResult{}, err - } - if !countRaw.Exists { - // No count, means we have no list - countRaw.Value = 0 - } - - // Create the set that will be our result - set := schema.ZeroValue().(*Set) - - // If we have an empty list, then return an empty list - if countRaw.Computed || countRaw.Value.(int) == 0 { - return FieldReadResult{ - Value: set, - Exists: countRaw.Exists, - Computed: countRaw.Computed, - }, nil - } - - // Go through the map and find all the set items - prefix := strings.Join(address, ".") + "." - countExpected := countRaw.Value.(int) - countActual := make(map[string]struct{}) - completed := r.Map.Range(func(k, _ string) bool { - if !strings.HasPrefix(k, prefix) { - return true - } - if strings.HasPrefix(k, prefix+"#") { - // Ignore the count field - return true - } - - // Split the key, since it might be a sub-object like "idx.field" - parts := strings.Split(k[len(prefix):], ".") - idx := parts[0] - - var raw FieldReadResult - raw, err = r.ReadField(append(address, idx)) - if err != nil { - return false - } - if !raw.Exists { - // This shouldn't happen because we just verified it does exist - panic("missing field in set: " + k + "." + idx) - } - - set.Add(raw.Value) - - // Due to the way multimap readers work, if we've seen the number - // of fields we expect, then exit so that we don't read later values. - // For example: the "set" map might have "ports.#", "ports.0", and - // "ports.1", but the "state" map might have those plus "ports.2". - // We don't want "ports.2" - countActual[idx] = struct{}{} - if len(countActual) >= countExpected { - return false - } - - return true - }) - if !completed && err != nil { - return FieldReadResult{}, err - } - - return FieldReadResult{ - Value: set, - Exists: true, - }, nil -} - -// MapReader is an interface that is given to MapFieldReader for accessing -// a "map". This can be used to have alternate implementations. For a basic -// map[string]string, use BasicMapReader. -type MapReader interface { - Access(string) (string, bool) - Range(func(string, string) bool) bool -} - -// BasicMapReader implements MapReader for a single map. -type BasicMapReader map[string]string - -func (r BasicMapReader) Access(k string) (string, bool) { - v, ok := r[k] - return v, ok -} - -func (r BasicMapReader) Range(f func(string, string) bool) bool { - for k, v := range r { - if cont := f(k, v); !cont { - return false - } - } - - return true -} - -// MultiMapReader reads over multiple maps, preferring keys that are -// founder earlier (lower number index) vs. later (higher number index) -type MultiMapReader []map[string]string - -func (r MultiMapReader) Access(k string) (string, bool) { - for _, m := range r { - if v, ok := m[k]; ok { - return v, ok - } - } - - return "", false -} - -func (r MultiMapReader) Range(f func(string, string) bool) bool { - done := make(map[string]struct{}) - for _, m := range r { - for k, v := range m { - if _, ok := done[k]; ok { - continue - } - - if cont := f(k, v); !cont { - return false - } - - done[k] = struct{}{} - } - } - - return true -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go deleted file mode 100644 index 89ad3a8..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go +++ /dev/null @@ -1,63 +0,0 @@ -package schema - -import ( - "fmt" -) - -// MultiLevelFieldReader reads from other field readers, -// merging their results along the way in a specific order. You can specify -// "levels" and name them in order to read only an exact level or up to -// a specific level. -// -// This is useful for saying things such as "read the field from the state -// and config and merge them" or "read the latest value of the field". -type MultiLevelFieldReader struct { - Readers map[string]FieldReader - Levels []string -} - -func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { - return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) -} - -func (r *MultiLevelFieldReader) ReadFieldExact( - address []string, level string) (FieldReadResult, error) { - reader, ok := r.Readers[level] - if !ok { - return FieldReadResult{}, fmt.Errorf( - "Unknown reader level: %s", level) - } - - result, err := reader.ReadField(address) - if err != nil { - return FieldReadResult{}, fmt.Errorf( - "Error reading level %s: %s", level, err) - } - - return result, nil -} - -func (r *MultiLevelFieldReader) ReadFieldMerge( - address []string, level string) (FieldReadResult, error) { - var result FieldReadResult - for _, l := range r.Levels { - if r, ok := r.Readers[l]; ok { - out, err := r.ReadField(address) - if err != nil { - return FieldReadResult{}, fmt.Errorf( - "Error reading level %s: %s", l, err) - } - - // TODO: computed - if out.Exists { - result = out - } - } - - if l == level { - break - } - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go deleted file mode 100644 index 9abc41b..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go +++ /dev/null @@ -1,8 +0,0 @@ -package schema - -// FieldWriters are responsible for writing fields by address into -// a proper typed representation. ResourceData uses this to write new data -// into existing sources. -type FieldWriter interface { - WriteField([]string, interface{}) error -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go deleted file mode 100644 index 689ed8d..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ /dev/null @@ -1,319 +0,0 @@ -package schema - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "github.com/mitchellh/mapstructure" -) - -// MapFieldWriter writes data into a single map[string]string structure. -type MapFieldWriter struct { - Schema map[string]*Schema - - lock sync.Mutex - result map[string]string -} - -// Map returns the underlying map that is being written to. -func (w *MapFieldWriter) Map() map[string]string { - w.lock.Lock() - defer w.lock.Unlock() - if w.result == nil { - w.result = make(map[string]string) - } - - return w.result -} - -func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { - w.lock.Lock() - defer w.lock.Unlock() - if w.result == nil { - w.result = make(map[string]string) - } - - w.result[addr] = value -} - -func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { - w.lock.Lock() - defer w.lock.Unlock() - if w.result == nil { - w.result = make(map[string]string) - } - - schemaList := addrToSchema(addr, w.Schema) - if len(schemaList) == 0 { - return fmt.Errorf("Invalid address to set: %#v", addr) - } - - // If we're setting anything other than a list root or set root, - // then disallow it. - for _, schema := range schemaList[:len(schemaList)-1] { - if schema.Type == TypeList { - return fmt.Errorf( - "%s: can only set full list", - strings.Join(addr, ".")) - } - - if schema.Type == TypeMap { - return fmt.Errorf( - "%s: can only set full map", - strings.Join(addr, ".")) - } - - if schema.Type == TypeSet { - return fmt.Errorf( - "%s: can only set full set", - strings.Join(addr, ".")) - } - } - - return w.set(addr, value) -} - -func (w *MapFieldWriter) set(addr []string, value interface{}) error { - schemaList := addrToSchema(addr, w.Schema) - if len(schemaList) == 0 { - return fmt.Errorf("Invalid address to set: %#v", addr) - } - - schema := schemaList[len(schemaList)-1] - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - return w.setPrimitive(addr, value, schema) - case TypeList: - return w.setList(addr, value, schema) - case TypeMap: - return w.setMap(addr, value, schema) - case TypeSet: - return w.setSet(addr, value, schema) - case typeObject: - return w.setObject(addr, value, schema) - default: - panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) - } -} - -func (w *MapFieldWriter) setList( - addr []string, - v interface{}, - schema *Schema) error { - k := strings.Join(addr, ".") - setElement := func(idx string, value interface{}) error { - addrCopy := make([]string, len(addr), len(addr)+1) - copy(addrCopy, addr) - return w.set(append(addrCopy, idx), value) - } - - var vs []interface{} - if err := mapstructure.Decode(v, &vs); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - // Set the entire list. - var err error - for i, elem := range vs { - is := strconv.FormatInt(int64(i), 10) - err = setElement(is, elem) - if err != nil { - break - } - } - if err != nil { - for i, _ := range vs { - is := strconv.FormatInt(int64(i), 10) - setElement(is, nil) - } - - return err - } - - w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) - return nil -} - -func (w *MapFieldWriter) setMap( - addr []string, - value interface{}, - schema *Schema) error { - k := strings.Join(addr, ".") - v := reflect.ValueOf(value) - vs := make(map[string]interface{}) - - if value == nil { - // The empty string here means the map is removed. - w.result[k] = "" - return nil - } - - if v.Kind() != reflect.Map { - return fmt.Errorf("%s: must be a map", k) - } - if v.Type().Key().Kind() != reflect.String { - return fmt.Errorf("%s: keys must strings", k) - } - for _, mk := range v.MapKeys() { - mv := v.MapIndex(mk) - vs[mk.String()] = mv.Interface() - } - - // Remove the pure key since we're setting the full map value - delete(w.result, k) - - // Set each subkey - addrCopy := make([]string, len(addr), len(addr)+1) - copy(addrCopy, addr) - for subKey, v := range vs { - if err := w.set(append(addrCopy, subKey), v); err != nil { - return err - } - } - - // Set the count - w.result[k+".%"] = strconv.Itoa(len(vs)) - - return nil -} - -func (w *MapFieldWriter) setObject( - addr []string, - value interface{}, - schema *Schema) error { - // Set the entire object. First decode into a proper structure - var v map[string]interface{} - if err := mapstructure.Decode(value, &v); err != nil { - return fmt.Errorf("%s: %s", strings.Join(addr, "."), err) - } - - // Make space for additional elements in the address - addrCopy := make([]string, len(addr), len(addr)+1) - copy(addrCopy, addr) - - // Set each element in turn - var err error - for k1, v1 := range v { - if err = w.set(append(addrCopy, k1), v1); err != nil { - break - } - } - if err != nil { - for k1, _ := range v { - w.set(append(addrCopy, k1), nil) - } - } - - return err -} - -func (w *MapFieldWriter) setPrimitive( - addr []string, - v interface{}, - schema *Schema) error { - k := strings.Join(addr, ".") - - if v == nil { - // The empty string here means the value is removed. - w.result[k] = "" - return nil - } - - var set string - switch schema.Type { - case TypeBool: - var b bool - if err := mapstructure.Decode(v, &b); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - set = strconv.FormatBool(b) - case TypeString: - if err := mapstructure.Decode(v, &set); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - case TypeInt: - var n int - if err := mapstructure.Decode(v, &n); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - set = strconv.FormatInt(int64(n), 10) - case TypeFloat: - var n float64 - if err := mapstructure.Decode(v, &n); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - set = strconv.FormatFloat(float64(n), 'G', -1, 64) - default: - return fmt.Errorf("Unknown type: %#v", schema.Type) - } - - w.result[k] = set - return nil -} - -func (w *MapFieldWriter) setSet( - addr []string, - value interface{}, - schema *Schema) error { - addrCopy := make([]string, len(addr), len(addr)+1) - copy(addrCopy, addr) - k := strings.Join(addr, ".") - - if value == nil { - w.result[k+".#"] = "0" - return nil - } - - // If it is a slice, then we have to turn it into a *Set so that - // we get the proper order back based on the hash code. - if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { - // Build a temp *ResourceData to use for the conversion - tempSchema := *schema - tempSchema.Type = TypeList - tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} - tempW := &MapFieldWriter{Schema: tempSchemaMap} - - // Set the entire list, this lets us get sane values out of it - if err := tempW.WriteField(addr, value); err != nil { - return err - } - - // Build the set by going over the list items in order and - // hashing them into the set. The reason we go over the list and - // not the `value` directly is because this forces all types - // to become []interface{} (generic) instead of []string, which - // most hash functions are expecting. - s := schema.ZeroValue().(*Set) - tempR := &MapFieldReader{ - Map: BasicMapReader(tempW.Map()), - Schema: tempSchemaMap, - } - for i := 0; i < v.Len(); i++ { - is := strconv.FormatInt(int64(i), 10) - result, err := tempR.ReadField(append(addrCopy, is)) - if err != nil { - return err - } - if !result.Exists { - panic("set item just set doesn't exist") - } - - s.Add(result.Value) - } - - value = s - } - - for code, elem := range value.(*Set).m { - if err := w.set(append(addrCopy, code), elem); err != nil { - return err - } - } - - w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go deleted file mode 100644 index 3a97629..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. - -package schema - -import "fmt" - -const ( - _getSource_name_0 = "getSourceStategetSourceConfig" - _getSource_name_1 = "getSourceDiff" - _getSource_name_2 = "getSourceSet" - _getSource_name_3 = "getSourceLevelMaskgetSourceExact" -) - -var ( - _getSource_index_0 = [...]uint8{0, 14, 29} - _getSource_index_1 = [...]uint8{0, 13} - _getSource_index_2 = [...]uint8{0, 12} - _getSource_index_3 = [...]uint8{0, 18, 32} -) - -func (i getSource) String() string { - switch { - case 1 <= i && i <= 2: - i -= 1 - return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] - case i == 4: - return _getSource_name_1 - case i == 8: - return _getSource_name_2 - case 15 <= i && i <= 16: - i -= 15 - return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] - default: - return fmt.Sprintf("getSource(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go deleted file mode 100644 index fb28b41..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ /dev/null @@ -1,417 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -// Provider represents a resource provider in Terraform, and properly -// implements all of the ResourceProvider API. -// -// By defining a schema for the configuration of the provider, the -// map of supporting resources, and a configuration function, the schema -// framework takes over and handles all the provider operations for you. -// -// After defining the provider structure, it is unlikely that you'll require any -// of the methods on Provider itself. -type Provider struct { - // Schema is the schema for the configuration of this provider. If this - // provider has no configuration, this can be omitted. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ResourcesMap is the list of available resources that this provider - // can manage, along with their Resource structure defining their - // own schemas and CRUD operations. - // - // Provider automatically handles routing operations such as Apply, - // Diff, etc. to the proper resource. - ResourcesMap map[string]*Resource - - // DataSourcesMap is the collection of available data sources that - // this provider implements, with a Resource instance defining - // the schema and Read operation of each. - // - // Resource instances for data sources must have a Read function - // and must *not* implement Create, Update or Delete. - DataSourcesMap map[string]*Resource - - // ConfigureFunc is a function for configuring the provider. If the - // provider doesn't need to be configured, this can be omitted. - // - // See the ConfigureFunc documentation for more information. - ConfigureFunc ConfigureFunc - - // MetaReset is called by TestReset to reset any state stored in the meta - // interface. This is especially important if the StopContext is stored by - // the provider. - MetaReset func() error - - meta interface{} - - // a mutex is required because TestReset can directly repalce the stopCtx - stopMu sync.Mutex - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once -} - -// ConfigureFunc is the function used to configure a Provider. -// -// The interface{} value returned by this function is stored and passed into -// the subsequent resources as the meta parameter. This return value is -// usually used to pass along a configured API client, a configuration -// structure, etc. -type ConfigureFunc func(*ResourceData) (interface{}, error) - -// InternalValidate should be called to validate the structure -// of the provider. -// -// This should be called in a unit test for any provider to verify -// before release that a provider is properly configured for use with -// this library. -func (p *Provider) InternalValidate() error { - if p == nil { - return errors.New("provider is nil") - } - - var validationErrors error - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - - // Provider-specific checks - for k, _ := range sm { - if isReservedProviderFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a provider", k) - } - } - - for k, r := range p.ResourcesMap { - if err := r.InternalValidate(nil, true); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) - } - } - - for k, r := range p.DataSourcesMap { - if err := r.InternalValidate(nil, false); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) - } - } - - return validationErrors -} - -func isReservedProviderFieldName(name string) bool { - for _, reservedName := range config.ReservedProviderFields { - if name == reservedName { - return true - } - } - return false -} - -// Meta returns the metadata associated with this provider that was -// returned by the Configure call. It will be nil until Configure is called. -func (p *Provider) Meta() interface{} { - return p.meta -} - -// SetMeta can be used to forcefully set the Meta object of the provider. -// Note that if Configure is called the return value will override anything -// set here. -func (p *Provider) SetMeta(v interface{}) { - p.meta = v -} - -// Stopped reports whether the provider has been stopped or not. -func (p *Provider) Stopped() bool { - ctx := p.StopContext() - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// StopCh returns a channel that is closed once the provider is stopped. -func (p *Provider) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - return p.stopCtx -} - -func (p *Provider) stopInit() { - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvider interface. -func (p *Provider) Stop() error { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtxCancel() - return nil -} - -// TestReset resets any state stored in the Provider, and will call TestReset -// on Meta if it implements the TestProvider interface. -// This may be used to reset the schema.Provider at the start of a test, and is -// automatically called by resource.Test. -func (p *Provider) TestReset() error { - p.stopInit() - if p.MetaReset != nil { - return p.MetaReset() - } - return nil -} - -// Input implementation of terraform.ResourceProvider interface. -func (p *Provider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return schemaMap(p.Schema).Input(input, c) -} - -// Validate implementation of terraform.ResourceProvider interface. -func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provider failed! This is always a bug\n"+ - "with the provider itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - return schemaMap(p.Schema).Validate(c) -} - -// ValidateResource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.ResourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support resource: %s", t)} - } - - return r.Validate(c) -} - -// Configure implementation of terraform.ResourceProvider interface. -func (p *Provider) Configure(c *terraform.ResourceConfig) error { - // No configuration - if p.ConfigureFunc == nil { - return nil - } - - sm := schemaMap(p.Schema) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) - if err != nil { - return err - } - - data, err := sm.Data(nil, diff) - if err != nil { - return err - } - - meta, err := p.ConfigureFunc(data) - if err != nil { - return err - } - - p.meta = meta - return nil -} - -// Apply implementation of terraform.ResourceProvider interface. -func (p *Provider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Apply(s, d, p.meta) -} - -// Diff implementation of terraform.ResourceProvider interface. -func (p *Provider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Diff(s, c) -} - -// Refresh implementation of terraform.ResourceProvider interface. -func (p *Provider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Refresh(s, p.meta) -} - -// Resources implementation of terraform.ResourceProvider interface. -func (p *Provider) Resources() []terraform.ResourceType { - keys := make([]string, 0, len(p.ResourcesMap)) - for k, _ := range p.ResourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.ResourceType, 0, len(keys)) - for _, k := range keys { - resource := p.ResourcesMap[k] - - // This isn't really possible (it'd fail InternalValidate), but - // we do it anyways to avoid a panic. - if resource == nil { - resource = &Resource{} - } - - result = append(result, terraform.ResourceType{ - Name: k, - Importable: resource.Importer != nil, - }) - } - - return result -} - -func (p *Provider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - // Find the resource - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - // If it doesn't support import, error - if r.Importer == nil { - return nil, fmt.Errorf("resource %s doesn't support import", info.Type) - } - - // Create the data - data := r.Data(nil) - data.SetId(id) - data.SetType(info.Type) - - // Call the import function - results := []*ResourceData{data} - if r.Importer.State != nil { - var err error - results, err = r.Importer.State(data, p.meta) - if err != nil { - return nil, err - } - } - - // Convert the results to InstanceState values and return it - states := make([]*terraform.InstanceState, len(results)) - for i, r := range results { - states[i] = r.State() - } - - // Verify that all are non-nil. If there are any nil the error - // isn't obvious so we circumvent that with a friendlier error. - for _, s := range states { - if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") - } - } - - return states, nil -} - -// ValidateDataSource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.DataSourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support data source: %s", t)} - } - - return r.Validate(c) -} - -// ReadDataDiff implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.Diff(nil, c) -} - -// RefreshData implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.ReadDataApply(d, p.meta) -} - -// DataSources implementation of terraform.ResourceProvider interface. -func (p *Provider) DataSources() []terraform.DataSource { - keys := make([]string, 0, len(p.DataSourcesMap)) - for k, _ := range p.DataSourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.DataSource, 0, len(keys)) - for _, k := range keys { - result = append(result, terraform.DataSource{ - Name: k, - }) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go deleted file mode 100644 index 476192e..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go +++ /dev/null @@ -1,204 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -// Provisioner represents a resource provisioner in Terraform and properly -// implements all of the ResourceProvisioner API. -// -// This higher level structure makes it much easier to implement a new or -// custom provisioner for Terraform. -// -// The function callbacks for this structure are all passed a context object. -// This context object has a number of pre-defined values that can be accessed -// via the global functions defined in context.go. -type Provisioner struct { - // ConnSchema is the schema for the connection settings for this - // provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - // - // NOTE: The value of connection keys can only be strings for now. - ConnSchema map[string]*Schema - - // Schema is the schema for the usage of this provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ApplyFunc is the function for executing the provisioner. This is required. - // It is given a context. See the Provisioner struct docs for more - // information. - ApplyFunc func(ctx context.Context) error - - // ValidateFunc is a function for extended validation. This is optional - // and should be used when individual field validation is not enough. - ValidateFunc func(*terraform.ResourceConfig) ([]string, []error) - - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once -} - -// Keys that can be used to access data in the context parameters for -// Provisioners. -var ( - connDataInvalid = contextKey("data invalid") - - // This returns a *ResourceData for the connection information. - // Guaranteed to never be nil. - ProvConnDataKey = contextKey("provider conn data") - - // This returns a *ResourceData for the config information. - // Guaranteed to never be nil. - ProvConfigDataKey = contextKey("provider config data") - - // This returns a terraform.UIOutput. Guaranteed to never be nil. - ProvOutputKey = contextKey("provider output") - - // This returns the raw InstanceState passed to Apply. Guaranteed to - // be set, but may be nil. - ProvRawStateKey = contextKey("provider raw state") -) - -// InternalValidate should be called to validate the structure -// of the provisioner. -// -// This should be called in a unit test to verify before release that this -// structure is properly configured for use. -func (p *Provisioner) InternalValidate() error { - if p == nil { - return errors.New("provisioner is nil") - } - - var validationErrors error - { - sm := schemaMap(p.ConnSchema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - { - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - if p.ApplyFunc == nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf( - "ApplyFunc must not be nil")) - } - - return validationErrors -} - -// StopContext returns a context that checks whether a provisioner is stopped. -func (p *Provisioner) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - return p.stopCtx -} - -func (p *Provisioner) stopInit() { - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Stop() error { - p.stopOnce.Do(p.stopInit) - p.stopCtxCancel() - return nil -} - -// Apply implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Apply( - o terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - var connData, configData *ResourceData - - { - // We first need to turn the connection information into a - // terraform.ResourceConfig so that we can use that type to more - // easily build a ResourceData structure. We do this by simply treating - // the conn info as configuration input. - raw := make(map[string]interface{}) - if s != nil { - for k, v := range s.Ephemeral.ConnInfo { - raw[k] = v - } - } - - c, err := config.NewRawConfig(raw) - if err != nil { - return err - } - - sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) - if err != nil { - return err - } - connData, err = sm.Data(nil, diff) - if err != nil { - return err - } - } - - { - // Build the configuration data. Doing this requires making a "diff" - // even though that's never used. We use that just to get the correct types. - configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c) - if err != nil { - return err - } - configData, err = configMap.Data(nil, diff) - if err != nil { - return err - } - } - - // Build the context and call the function - ctx := p.StopContext() - ctx = context.WithValue(ctx, ProvConnDataKey, connData) - ctx = context.WithValue(ctx, ProvConfigDataKey, configData) - ctx = context.WithValue(ctx, ProvOutputKey, o) - ctx = context.WithValue(ctx, ProvRawStateKey, s) - return p.ApplyFunc(ctx) -} - -// Validate implements the terraform.ResourceProvisioner interface. -func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provisioner failed! This is always a bug\n"+ - "with the provisioner itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - if p.Schema != nil { - w, e := schemaMap(p.Schema).Validate(c) - ws = append(ws, w...) - es = append(es, e...) - } - - if p.ValidateFunc != nil { - w, e := p.ValidateFunc(c) - ws = append(ws, w...) - es = append(es, e...) - } - - return ws, es -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go deleted file mode 100644 index ddba109..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ /dev/null @@ -1,501 +0,0 @@ -package schema - -import ( - "errors" - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). -// -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. -// -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. -type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. - // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. - Schema map[string]*Schema - - // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. - // - // When unset, SchemaVersion defaults to 0, so provider authors can start - // their Versioning at any integer >= 1 - SchemaVersion int - - // MigrateState is responsible for updating an InstanceState with an old - // version to the format expected by the current version of the Schema. - // - // It is called during Refresh if the State's stored SchemaVersion is less - // than the current SchemaVersion of the Resource. - // - // The function is yielded the state's stored SchemaVersion and a pointer to - // the InstanceState that needs updating, as well as the configured - // provider's configured meta interface{}, in case the migration process - // needs to make any remote API calls. - MigrateState StateMigrateFunc - - // The functions below are the CRUD operations for this resource. - // - // The only optional operation is Update. If Update is not implemented, - // then updates will not be supported for this resource. - // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. - // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. - // - // If any errors occur during each of the operation, an error should be - // returned. If a resource was partially updated, be careful to enable - // partial state mode for ResourceData and use it accordingly. - // - // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff - // accordingly. If this function isn't set, it will not be called. It - // is highly recommended to set it. The *ResourceData passed to Exists - // should _not_ be modified. - Create CreateFunc - Read ReadFunc - Update UpdateFunc - Delete DeleteFunc - Exists ExistsFunc - - // Importer is the ResourceImporter implementation for this resource. - // If this is nil, then this resource does not support importing. If - // this is non-nil, then it supports importing and ResourceImporter - // must be validated. The validity of ResourceImporter is verified - // by InternalValidate on Resource. - Importer *ResourceImporter - - // If non-empty, this string is emitted as a warning during Validate. - // This is a private interface for now, for use by DataSourceResourceShim, - // and not for general use. (But maybe later...) - deprecationMessage string - - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. - Timeouts *ResourceTimeout -} - -// See Resource documentation. -type CreateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ReadFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type UpdateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type DeleteFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ExistsFunc func(*ResourceData, interface{}) (bool, error) - -// See Resource documentation. -type StateMigrateFunc func( - int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) - -// Apply creates, updates, and/or deletes a resource. -func (r *Resource) Apply( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - data, err := schemaMap(r.Schema).Data(s, d) - if err != nil { - return s, err - } - - // Instance Diff shoould have the timeout info, need to copy it over to the - // ResourceData meta - rt := ResourceTimeout{} - if _, ok := d.Meta[TimeoutKey]; ok { - if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } else if s != nil { - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") - } - data.timeouts = &rt - - if s == nil { - // The Terraform API dictates that this should never happen, but - // it doesn't hurt to be safe in this case. - s = new(terraform.InstanceState) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource since it is created - if err := r.Delete(data, meta); err != nil { - return r.recordCurrentSchemaVersion(data.State()), err - } - - // Make sure the ID is gone. - data.SetId("") - } - - // If we're only destroying, and not creating, then return - // now since we're done! - if !d.RequiresNew() { - return nil, nil - } - - // Reset the data to be stateless since we just destroyed - data, err = schemaMap(r.Schema).Data(nil, d) - // data was reset, need to re-apply the parsed timeouts - data.timeouts = &rt - if err != nil { - return nil, err - } - } - - err = nil - if data.Id() == "" { - // We're creating, it is a new resource. - data.MarkNewResource() - err = r.Create(data, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf("doesn't support update") - } - - err = r.Update(data, meta) - } - - return r.recordCurrentSchemaVersion(data.State()), err -} - -// Diff returns a diff of this resource and is API compatible with the -// ResourceProvider interface. -func (r *Resource) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - t := &ResourceTimeout{} - err := t.ConfigDecode(r, c) - - if err != nil { - return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) - } - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c) - if err != nil { - return instanceDiff, err - } - - if instanceDiff != nil { - if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) - } - } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") - } - - return instanceDiff, err -} - -// Validate validates the resource configuration against the schema. -func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := schemaMap(r.Schema).Validate(c) - - if r.deprecationMessage != "" { - warns = append(warns, r.deprecationMessage) - } - - return warns, errs -} - -// ReadDataApply loads the data for a data source, given a diff that -// describes the configuration arguments and desired computed attributes. -func (r *Resource) ReadDataApply( - d *terraform.InstanceDiff, - meta interface{}, -) (*terraform.InstanceState, error) { - - // Data sources are always built completely from scratch - // on each read, so the source state is always nil. - data, err := schemaMap(r.Schema).Data(nil, d) - if err != nil { - return nil, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - // Data sources can set an ID if they want, but they aren't - // required to; we'll provide a placeholder if they don't, - // to preserve the invariant that all resources have non-empty - // ids. - state.ID = "-" - } - - return r.recordCurrentSchemaVersion(state), err -} - -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - if needsMigration && r.MigrateState != nil { - s, err := r.MigrateState(stateSchemaVersion, s, meta) - if err != nil { - return s, err - } - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -// InternalValidate should be called to validate the structure -// of the resource. -// -// This should be called in a unit test for any resource to verify -// before release that a resource is properly configured for use with -// this library. -// -// Provider.InternalValidate() will automatically call this for all of -// the resources it manages, so you don't need to call this manually if it -// is part of a Provider. -func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { - if r == nil { - return errors.New("resource is nil") - } - - if !writable { - if r.Create != nil || r.Update != nil || r.Delete != nil { - return fmt.Errorf("must not implement Create, Update or Delete") - } - } - - tsm := topSchemaMap - - if r.isTopLevel() && writable { - // All non-Computed attributes must be ForceNew if Update is not defined - if r.Update == nil { - nonForceNewAttrs := make([]string, 0) - for k, v := range r.Schema { - if !v.ForceNew && !v.Computed { - nonForceNewAttrs = append(nonForceNewAttrs, k) - } - } - if len(nonForceNewAttrs) > 0 { - return fmt.Errorf( - "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) - } - } else { - nonUpdateableAttrs := make([]string, 0) - for k, v := range r.Schema { - if v.ForceNew || v.Computed && !v.Optional { - nonUpdateableAttrs = append(nonUpdateableAttrs, k) - } - } - updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) - if updateableAttrs == 0 { - return fmt.Errorf( - "All fields are ForceNew or Computed w/out Optional, Update is superfluous") - } - } - - tsm = schemaMap(r.Schema) - - // Destroy, and Read are required - if r.Read == nil { - return fmt.Errorf("Read must be implemented") - } - if r.Delete == nil { - return fmt.Errorf("Delete must be implemented") - } - - // If we have an importer, we need to verify the importer. - if r.Importer != nil { - if err := r.Importer.InternalValidate(); err != nil { - return err - } - } - } - - // Resource-specific checks - for k, _ := range tsm { - if isReservedResourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a resource", k) - } - } - - return schemaMap(r.Schema).InternalValidate(tsm) -} - -func isReservedResourceFieldName(name string) bool { - for _, reservedName := range config.ReservedResourceFields { - if name == reservedName { - return true - } - } - return false -} - -// Data returns a ResourceData struct for this Resource. Each return value -// is a separate copy and can be safely modified differently. -// -// The data returned from this function has no actual affect on the Resource -// itself (including the state given to this function). -// -// This function is useful for unit tests and ResourceImporter functions. -func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { - result, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - // At the time of writing, this isn't possible (Data never returns - // non-nil errors). We panic to find this in the future if we have to. - // I don't see a reason for Data to ever return an error. - panic(err) - } - - // Set the schema version to latest by default - result.meta = map[string]interface{}{ - "schema_version": strconv.Itoa(r.SchemaVersion), - } - - return result -} - -// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing -// -// TODO: May be able to be removed with the above ResourceData function. -func (r *Resource) TestResourceData() *ResourceData { - return &ResourceData{ - schema: r.Schema, - } -} - -// Returns true if the resource is "top level" i.e. not a sub-resource. -func (r *Resource) isTopLevel() bool { - // TODO: This is a heuristic; replace with a definitive attribute? - return r.Create != nil -} - -// Determines if a given InstanceState needs to be migrated by checking the -// stored version number with the current SchemaVersion -func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { - // Get the raw interface{} value for the schema version. If it doesn't - // exist or is nil then set it to zero. - raw := is.Meta["schema_version"] - if raw == nil { - raw = "0" - } - - // Try to convert it to a string. If it isn't a string then we pretend - // that it isn't set at all. It should never not be a string unless it - // was manually tampered with. - rawString, ok := raw.(string) - if !ok { - rawString = "0" - } - - stateSchemaVersion, _ := strconv.Atoi(rawString) - return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion -} - -func (r *Resource) recordCurrentSchemaVersion( - state *terraform.InstanceState) *terraform.InstanceState { - if state != nil && r.SchemaVersion > 0 { - if state.Meta == nil { - state.Meta = make(map[string]interface{}) - } - state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) - } - return state -} - -// Noop is a convenience implementation of resource function which takes -// no action and returns no error. -func Noop(*ResourceData, interface{}) error { - return nil -} - -// RemoveFromState is a convenience implementation of a resource function -// which sets the resource ID to empty string (to remove it from state) -// and returns no error. -func RemoveFromState(d *ResourceData, _ interface{}) error { - d.SetId("") - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go deleted file mode 100644 index 15aa0b5..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ /dev/null @@ -1,518 +0,0 @@ -package schema - -import ( - "log" - "reflect" - "strings" - "sync" - "time" - - "github.com/hashicorp/terraform/terraform" -) - -// ResourceData is used to query and set the attributes of a resource. -// -// ResourceData is the primary argument received for CRUD operations on -// a resource as well as configuration of a provider. It is a powerful -// structure that can be used to not only query data, but check for changes, -// define partial state updates, etc. -// -// The most relevant methods to take a look at are Get, Set, and Partial. -type ResourceData struct { - // Settable (internally) - schema map[string]*Schema - config *terraform.ResourceConfig - state *terraform.InstanceState - diff *terraform.InstanceDiff - meta map[string]interface{} - timeouts *ResourceTimeout - - // Don't set - multiReader *MultiLevelFieldReader - setWriter *MapFieldWriter - newState *terraform.InstanceState - partial bool - partialMap map[string]struct{} - once sync.Once - isNew bool -} - -// getResult is the internal structure that is generated when a Get -// is called that contains some extra data that might be used. -type getResult struct { - Value interface{} - ValueProcessed interface{} - Computed bool - Exists bool - Schema *Schema -} - -// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary -// values, bypassing schema. This MUST NOT be used in normal circumstances - -// it exists only to support the remote_state data source. -func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { - d.once.Do(d.init) - - d.setWriter.unsafeWriteField(key, value) -} - -// Get returns the data for the given key, or nil if the key doesn't exist -// in the schema. -// -// If the key does exist in the schema but doesn't exist in the configuration, -// then the default value for that type will be returned. For strings, this is -// "", for numbers it is 0, etc. -// -// If you want to test if something is set at all in the configuration, -// use GetOk. -func (d *ResourceData) Get(key string) interface{} { - v, _ := d.GetOk(key) - return v -} - -// GetChange returns the old and new value for a given key. -// -// HasChange should be used to check if a change exists. It is possible -// that both the old and new value are the same if the old value was not -// set and the new value is. This is common, for example, for boolean -// fields which have a zero value of false. -func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { - o, n := d.getChange(key, getSourceState, getSourceDiff) - return o.Value, n.Value -} - -// GetOk returns the data for the given key and whether or not the key -// has been set to a non-zero value at some point. -// -// The first result will not necessarilly be nil if the value doesn't exist. -// The second result should be checked to determine this information. -func (d *ResourceData) GetOk(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - if exists { - // If it exists, we also want to verify it is not the zero-value. - value := r.Value - zero := r.Schema.Type.Zero() - - if eq, ok := value.(Equal); ok { - exists = !eq.Equal(zero) - } else { - exists = !reflect.DeepEqual(value, zero) - } - } - - return r.Value, exists -} - -// GetOkExists returns the data for a given key and whether or not the key -// has been set to a non-zero value. This is only useful for determining -// if boolean attributes have been set, if they are Optional but do not -// have a Default value. -// -// This is nearly the same function as GetOk, yet it does not check -// for the zero value of the attribute's type. This allows for attributes -// without a default, to fully check for a literal assignment, regardless -// of the zero-value for that type. -// This should only be used if absolutely required/needed. -func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - return r.Value, exists -} - -func (d *ResourceData) getRaw(key string, level getSource) getResult { - var parts []string - if key != "" { - parts = strings.Split(key, ".") - } - - return d.get(parts, level) -} - -// HasChange returns whether or not the given key has been changed. -func (d *ResourceData) HasChange(key string) bool { - o, n := d.GetChange(key) - - // If the type implements the Equal interface, then call that - // instead of just doing a reflect.DeepEqual. An example where this is - // needed is *Set - if eq, ok := o.(Equal); ok { - return !eq.Equal(n) - } - - return !reflect.DeepEqual(o, n) -} - -// Partial turns partial state mode on/off. -// -// When partial state mode is enabled, then only key prefixes specified -// by SetPartial will be in the final state. This allows providers to return -// partial states for partially applied resources (when errors occur). -func (d *ResourceData) Partial(on bool) { - d.partial = on - if on { - if d.partialMap == nil { - d.partialMap = make(map[string]struct{}) - } - } else { - d.partialMap = nil - } -} - -// Set sets the value for the given key. -// -// If the key is invalid or the value is not a correct type, an error -// will be returned. -func (d *ResourceData) Set(key string, value interface{}) error { - d.once.Do(d.init) - - // If the value is a pointer to a non-struct, get its value and - // use that. This allows Set to take a pointer to primitives to - // simplify the interface. - reflectVal := reflect.ValueOf(value) - if reflectVal.Kind() == reflect.Ptr { - if reflectVal.IsNil() { - // If the pointer is nil, then the value is just nil - value = nil - } else { - // Otherwise, we dereference the pointer as long as its not - // a pointer to a struct, since struct pointers are allowed. - reflectVal = reflect.Indirect(reflectVal) - if reflectVal.Kind() != reflect.Struct { - value = reflectVal.Interface() - } - } - } - - return d.setWriter.WriteField(strings.Split(key, "."), value) -} - -// SetPartial adds the key to the final state output while -// in partial state mode. The key must be a root key in the schema (i.e. -// it cannot be "list.0"). -// -// If partial state mode is disabled, then this has no effect. Additionally, -// whenever partial state mode is toggled, the partial data is cleared. -func (d *ResourceData) SetPartial(k string) { - if d.partial { - d.partialMap[k] = struct{}{} - } -} - -func (d *ResourceData) MarkNewResource() { - d.isNew = true -} - -func (d *ResourceData) IsNewResource() bool { - return d.isNew -} - -// Id returns the ID of the resource. -func (d *ResourceData) Id() string { - var result string - - if d.state != nil { - result = d.state.ID - } - - if d.newState != nil { - result = d.newState.ID - } - - return result -} - -// ConnInfo returns the connection info for this resource. -func (d *ResourceData) ConnInfo() map[string]string { - if d.newState != nil { - return d.newState.Ephemeral.ConnInfo - } - - if d.state != nil { - return d.state.Ephemeral.ConnInfo - } - - return nil -} - -// SetId sets the ID of the resource. If the value is blank, then the -// resource is destroyed. -func (d *ResourceData) SetId(v string) { - d.once.Do(d.init) - d.newState.ID = v -} - -// SetConnInfo sets the connection info for a resource. -func (d *ResourceData) SetConnInfo(v map[string]string) { - d.once.Do(d.init) - d.newState.Ephemeral.ConnInfo = v -} - -// SetType sets the ephemeral type for the data. This is only required -// for importing. -func (d *ResourceData) SetType(t string) { - d.once.Do(d.init) - d.newState.Ephemeral.Type = t -} - -// State returns the new InstanceState after the diff and any Set -// calls. -func (d *ResourceData) State() *terraform.InstanceState { - var result terraform.InstanceState - result.ID = d.Id() - result.Meta = d.meta - - // If we have no ID, then this resource doesn't exist and we just - // return nil. - if result.ID == "" { - return nil - } - - if d.timeouts != nil { - if err := d.timeouts.StateEncode(&result); err != nil { - log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) - } - } - - // Look for a magic key in the schema that determines we skip the - // integrity check of fields existing in the schema, allowing dynamic - // keys to be created. - hasDynamicAttributes := false - for k, _ := range d.schema { - if k == "__has_dynamic_attributes" { - hasDynamicAttributes = true - log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) - } - } - - // In order to build the final state attributes, we read the full - // attribute set as a map[string]interface{}, write it to a MapFieldWriter, - // and then use that map. - rawMap := make(map[string]interface{}) - for k := range d.schema { - source := getSourceSet - if d.partial { - source = getSourceState - if _, ok := d.partialMap[k]; ok { - source = getSourceSet - } - } - - raw := d.get([]string{k}, source) - if raw.Exists && !raw.Computed { - rawMap[k] = raw.Value - if raw.ValueProcessed != nil { - rawMap[k] = raw.ValueProcessed - } - } - } - - mapW := &MapFieldWriter{Schema: d.schema} - if err := mapW.WriteField(nil, rawMap); err != nil { - return nil - } - - result.Attributes = mapW.Map() - - if hasDynamicAttributes { - // If we have dynamic attributes, just copy the attributes map - // one for one into the result attributes. - for k, v := range d.setWriter.Map() { - // Don't clobber schema values. This limits usage of dynamic - // attributes to names which _do not_ conflict with schema - // keys! - if _, ok := result.Attributes[k]; !ok { - result.Attributes[k] = v - } - } - } - - if d.newState != nil { - result.Ephemeral = d.newState.Ephemeral - } - - // TODO: This is hacky and we can remove this when we have a proper - // state writer. We should instead have a proper StateFieldWriter - // and use that. - for k, schema := range d.schema { - if schema.Type != TypeMap { - continue - } - - if result.Attributes[k] == "" { - delete(result.Attributes, k) - } - } - - if v := d.Id(); v != "" { - result.Attributes["id"] = d.Id() - } - - if d.state != nil { - result.Tainted = d.state.Tainted - } - - return &result -} - -// Timeout returns the data for the given timeout key -// Returns a duration of 20 minutes for any key not found, or not found and no default. -func (d *ResourceData) Timeout(key string) time.Duration { - key = strings.ToLower(key) - - var timeout *time.Duration - switch key { - case TimeoutCreate: - timeout = d.timeouts.Create - case TimeoutRead: - timeout = d.timeouts.Read - case TimeoutUpdate: - timeout = d.timeouts.Update - case TimeoutDelete: - timeout = d.timeouts.Delete - } - - if timeout != nil { - return *timeout - } - - if d.timeouts.Default != nil { - return *d.timeouts.Default - } - - // Return system default of 20 minutes - return 20 * time.Minute -} - -func (d *ResourceData) init() { - // Initialize the field that will store our new state - var copyState terraform.InstanceState - if d.state != nil { - copyState = *d.state.DeepCopy() - } - d.newState = ©State - - // Initialize the map for storing set data - d.setWriter = &MapFieldWriter{Schema: d.schema} - - // Initialize the reader for getting data from the - // underlying sources (config, diff, etc.) - readers := make(map[string]FieldReader) - var stateAttributes map[string]string - if d.state != nil { - stateAttributes = d.state.Attributes - readers["state"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(stateAttributes), - } - } - if d.config != nil { - readers["config"] = &ConfigFieldReader{ - Schema: d.schema, - Config: d.config, - } - } - if d.diff != nil { - readers["diff"] = &DiffFieldReader{ - Schema: d.schema, - Diff: d.diff, - Source: &MultiLevelFieldReader{ - Levels: []string{"state", "config"}, - Readers: readers, - }, - } - } - readers["set"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(d.setWriter.Map()), - } - d.multiReader = &MultiLevelFieldReader{ - Levels: []string{ - "state", - "config", - "diff", - "set", - }, - - Readers: readers, - } -} - -func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool) { - // Get the change between the state and the config. - o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) - if !o.Exists { - o.Value = nil - } - if !n.Exists { - n.Value = nil - } - - // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed -} - -func (d *ResourceData) getChange( - k string, - oldLevel getSource, - newLevel getSource) (getResult, getResult) { - var parts, parts2 []string - if k != "" { - parts = strings.Split(k, ".") - parts2 = strings.Split(k, ".") - } - - o := d.get(parts, oldLevel) - n := d.get(parts2, newLevel) - return o, n -} - -func (d *ResourceData) get(addr []string, source getSource) getResult { - d.once.Do(d.init) - - level := "set" - flags := source & ^getSourceLevelMask - exact := flags&getSourceExact != 0 - source = source & getSourceLevelMask - if source >= getSourceSet { - level = "set" - } else if source >= getSourceDiff { - level = "diff" - } else if source >= getSourceConfig { - level = "config" - } else { - level = "state" - } - - var result FieldReadResult - var err error - if exact { - result, err = d.multiReader.ReadFieldExact(addr, level) - } else { - result, err = d.multiReader.ReadFieldMerge(addr, level) - } - if err != nil { - panic(err) - } - - // If the result doesn't exist, then we set the value to the zero value - var schema *Schema - if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { - schema = schemaL[len(schemaL)-1] - } - - if result.Value == nil && schema != nil { - result.Value = result.ValueOrZero(schema) - } - - // Transform the FieldReadResult into a getResult. It might be worth - // merging these two structures one day. - return getResult{ - Value: result.Value, - ValueProcessed: result.ValueProcessed, - Computed: result.Computed, - Exists: result.Exists, - Schema: schema, - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go deleted file mode 100644 index 7dd655d..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go +++ /dev/null @@ -1,17 +0,0 @@ -package schema - -//go:generate stringer -type=getSource resource_data_get_source.go - -// getSource represents the level we want to get for a value (internally). -// Any source less than or equal to the level will be loaded (whichever -// has a value first). -type getSource byte - -const ( - getSourceState getSource = 1 << iota - getSourceConfig - getSourceDiff - getSourceSet - getSourceExact // Only get from the _exact_ level - getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet -) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go deleted file mode 100644 index 5dada3c..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go +++ /dev/null @@ -1,52 +0,0 @@ -package schema - -// ResourceImporter defines how a resource is imported in Terraform. This -// can be set onto a Resource struct to make it Importable. Not all resources -// have to be importable; if a Resource doesn't have a ResourceImporter then -// it won't be importable. -// -// "Importing" in Terraform is the process of taking an already-created -// resource and bringing it under Terraform management. This can include -// updating Terraform state, generating Terraform configuration, etc. -type ResourceImporter struct { - // The functions below must all be implemented for importing to work. - - // State is called to convert an ID to one or more InstanceState to - // insert into the Terraform state. If this isn't specified, then - // the ID is passed straight through. - State StateFunc -} - -// StateFunc is the function called to import a resource into the -// Terraform state. It is given a ResourceData with only ID set. This -// ID is going to be an arbitrary value given by the user and may not map -// directly to the ID format that the resource expects, so that should -// be validated. -// -// This should return a slice of ResourceData that turn into the state -// that was imported. This might be as simple as returning only the argument -// that was given to the function. In other cases (such as AWS security groups), -// an import may fan out to multiple resources and this will have to return -// multiple. -// -// To create the ResourceData structures for other resource types (if -// you have to), instantiate your resource and call the Data function. -type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) - -// InternalValidate should be called to validate the structure of this -// importer. This should be called in a unit test. -// -// Resource.InternalValidate() will automatically call this, so this doesn't -// need to be called manually. Further, Resource.InternalValidate() is -// automatically called by Provider.InternalValidate(), so you only need -// to internal validate the provider. -func (r *ResourceImporter) InternalValidate() error { - return nil -} - -// ImportStatePassthrough is an implementation of StateFunc that can be -// used to simply pass the ID directly through. This should be used only -// in the case that an ID-only refresh is possible. -func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { - return []*ResourceData{d}, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go deleted file mode 100644 index 445819f..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go +++ /dev/null @@ -1,237 +0,0 @@ -package schema - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/copystructure" -) - -const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" -const TimeoutsConfigKey = "timeouts" - -const ( - TimeoutCreate = "create" - TimeoutRead = "read" - TimeoutUpdate = "update" - TimeoutDelete = "delete" - TimeoutDefault = "default" -) - -func timeoutKeys() []string { - return []string{ - TimeoutCreate, - TimeoutRead, - TimeoutUpdate, - TimeoutDelete, - TimeoutDefault, - } -} - -// could be time.Duration, int64 or float64 -func DefaultTimeout(tx interface{}) *time.Duration { - var td time.Duration - switch raw := tx.(type) { - case time.Duration: - return &raw - case int64: - td = time.Duration(raw) - case float64: - td = time.Duration(int64(raw)) - default: - log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) - } - return &td -} - -type ResourceTimeout struct { - Create, Read, Update, Delete, Default *time.Duration -} - -// ConfigDecode takes a schema and the configuration (available in Diff) and -// validates, parses the timeouts into `t` -func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error { - if s.Timeouts != nil { - raw, err := copystructure.Copy(s.Timeouts) - if err != nil { - log.Printf("[DEBUG] Error with deep copy: %s", err) - } - *t = *raw.(*ResourceTimeout) - } - - if raw, ok := c.Config[TimeoutsConfigKey]; ok { - if configTimeouts, ok := raw.([]map[string]interface{}); ok { - for _, timeoutValues := range configTimeouts { - // loop through each Timeout given in the configuration and validate they - // the Timeout defined in the resource - for timeKey, timeValue := range timeoutValues { - // validate that we're dealing with the normal CRUD actions - var found bool - for _, key := range timeoutKeys() { - if timeKey == key { - found = true - break - } - } - - if !found { - return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) - } - - // Get timeout - rt, err := time.ParseDuration(timeValue.(string)) - if err != nil { - return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err) - } - - var timeout *time.Duration - switch timeKey { - case TimeoutCreate: - timeout = t.Create - case TimeoutUpdate: - timeout = t.Update - case TimeoutRead: - timeout = t.Read - case TimeoutDelete: - timeout = t.Delete - case TimeoutDefault: - timeout = t.Default - } - - // If the resource has not delcared this in the definition, then error - // with an unsupported message - if timeout == nil { - return unsupportedTimeoutKeyError(timeKey) - } - - *timeout = rt - } - } - } else { - log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts") - } - } - - return nil -} - -func unsupportedTimeoutKeyError(key string) error { - return fmt.Errorf("Timeout Key (%s) is not supported", key) -} - -// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder -// interface: they encode/decode a timeouts struct from an instance diff, which is -// where the timeout data is stored after a diff to pass into Apply. -// -// StateEncode encodes the timeout into the ResourceData's InstanceState for -// saving to state -// -func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { - return t.metaEncode(id) -} - -func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error { - return t.metaEncode(is) -} - -// metaEncode encodes the ResourceTimeout into a map[string]interface{} format -// and stores it in the Meta field of the interface it's given. -// Assumes the interface is either *terraform.InstanceState or -// *terraform.InstanceDiff, returns an error otherwise -func (t *ResourceTimeout) metaEncode(ids interface{}) error { - m := make(map[string]interface{}) - - if t.Create != nil { - m[TimeoutCreate] = t.Create.Nanoseconds() - } - if t.Read != nil { - m[TimeoutRead] = t.Read.Nanoseconds() - } - if t.Update != nil { - m[TimeoutUpdate] = t.Update.Nanoseconds() - } - if t.Delete != nil { - m[TimeoutDelete] = t.Delete.Nanoseconds() - } - if t.Default != nil { - m[TimeoutDefault] = t.Default.Nanoseconds() - // for any key above that is nil, if default is specified, we need to - // populate it with the default - for _, k := range timeoutKeys() { - if _, ok := m[k]; !ok { - m[k] = t.Default.Nanoseconds() - } - } - } - - // only add the Timeout to the Meta if we have values - if len(m) > 0 { - switch instance := ids.(type) { - case *terraform.InstanceDiff: - if instance.Meta == nil { - instance.Meta = make(map[string]interface{}) - } - instance.Meta[TimeoutKey] = m - case *terraform.InstanceState: - if instance.Meta == nil { - instance.Meta = make(map[string]interface{}) - } - instance.Meta[TimeoutKey] = m - default: - return fmt.Errorf("Error matching type for Diff Encode") - } - } - - return nil -} - -func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error { - return t.metaDecode(id) -} -func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error { - return t.metaDecode(is) -} - -func (t *ResourceTimeout) metaDecode(ids interface{}) error { - var rawMeta interface{} - var ok bool - switch rawInstance := ids.(type) { - case *terraform.InstanceDiff: - rawMeta, ok = rawInstance.Meta[TimeoutKey] - if !ok { - return nil - } - case *terraform.InstanceState: - rawMeta, ok = rawInstance.Meta[TimeoutKey] - if !ok { - return nil - } - default: - return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) - } - - times := rawMeta.(map[string]interface{}) - if len(times) == 0 { - return nil - } - - if v, ok := times[TimeoutCreate]; ok { - t.Create = DefaultTimeout(v) - } - if v, ok := times[TimeoutRead]; ok { - t.Read = DefaultTimeout(v) - } - if v, ok := times[TimeoutUpdate]; ok { - t.Update = DefaultTimeout(v) - } - if v, ok := times[TimeoutDelete]; ok { - t.Delete = DefaultTimeout(v) - } - if v, ok := times[TimeoutDefault]; ok { - t.Default = DefaultTimeout(v) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go deleted file mode 100644 index acb5618..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ /dev/null @@ -1,1549 +0,0 @@ -// schema is a high-level framework for easily writing new providers -// for Terraform. Usage of schema is recommended over attempting to write -// to the low-level plugin interfaces manually. -// -// schema breaks down provider creation into simple CRUD operations for -// resources. The logic of diffing, destroying before creating, updating -// or creating, etc. is all handled by the framework. The plugin author -// only needs to implement a configuration schema and the CRUD operations and -// everything else is meant to just work. -// -// A good starting point is to view the Provider structure. -package schema - -import ( - "fmt" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" -) - -// type used for schema package context keys -type contextKey string - -// Schema is used to describe the structure of a value. -// -// Read the documentation of the struct elements for important details. -type Schema struct { - // Type is the type of the value and must be one of the ValueType values. - // - // This type not only determines what type is expected/valid in configuring - // this value, but also what type is returned when ResourceData.Get is - // called. The types returned by Get are: - // - // TypeBool - bool - // TypeInt - int - // TypeFloat - float64 - // TypeString - string - // TypeList - []interface{} - // TypeMap - map[string]interface{} - // TypeSet - *schema.Set - // - Type ValueType - - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. - // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. - Optional bool - Required bool - - // If this is non-nil, the provided function will be used during diff - // of this field. If this is nil, a default diff for the type of the - // schema will be used. - // - // This allows comparison based on something other than primitive, list - // or map equality - for example SSH public keys may be considered - // equivalent regardless of trailing whitespace. - DiffSuppressFunc SchemaDiffSuppressFunc - - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. - // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. - // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. - // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. - // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} - DefaultFunc SchemaDefaultFunc - - // Description is used as the description for docs or asking for user - // input. It should be relatively short (a few sentences max) and should - // be formatted to fit a CLI. - Description string - - // InputDefault is the default value to use for when inputs are requested. - // This differs from Default in that if Default is set, no input is - // asked for. If Input is asked, this will be the default value offered. - InputDefault string - - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // - // StateFunc is a function called to change the value of this before - // storing it in the state (and likewise before comparing for diffs). - // The use for this is for example with large strings, you may want - // to simply store the hash of it. - Computed bool - ForceNew bool - StateFunc SchemaStateFunc - - // The following fields are only set for a TypeList or TypeSet Type. - // - // Elem must be either a *Schema or a *Resource only if the Type is - // TypeList, and represents what the element type is. If it is *Schema, - // the element type is just a simple value. If it is *Resource, the - // element type is a complex structure, potentially with its own lifecycle. - // - // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // - // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. - // - // PromoteSingle, if true, will allow single elements to be standalone - // and promote them to a list. For example "foo" would be promoted to - // ["foo"] automatically. This is primarily for legacy reasons and the - // ambiguity is not recommended for new usage. Promotion is only allowed - // for primitive element types. - Elem interface{} - MaxItems int - MinItems int - PromoteSingle bool - - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. - Set SchemaSetFunc - - // ComputedWhen is a set of queries on the configuration. Whenever any - // of these things is changed, it will require a recompute (this requires - // that Computed is set to true). - // - // NOTE: This currently does not work. - ComputedWhen []string - - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - ConflictsWith []string - - // When Deprecated is set, this attribute is deprecated. - // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. - Deprecated string - - // When Removed is set, this attribute has been removed from the schema - // - // Removed attributes can be left in the Schema to generate informative error - // messages for the user when they show up in resource configurations. - // This string is the message shown to the user with instructions on - // what do to about the removed attribute. - Removed string - - // ValidateFunc allows individual fields to define arbitrary validation - // logic. It is yielded the provided config value as an interface{} that is - // guaranteed to be of the proper Schema type, and it can yield warnings or - // errors based on inspection of that value. - // - // ValidateFunc currently only works for primitive types. - ValidateFunc SchemaValidateFunc - - // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. - Sensitive bool -} - -// SchemaDiffSuppresFunc is a function which can be used to determine -// whether a detected diff on a schema element is "valid" or not, and -// suppress it from the plan if necessary. -// -// Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool - -// SchemaDefaultFunc is a function called to return a default value for -// a field. -type SchemaDefaultFunc func() (interface{}, error) - -// EnvDefaultFunc is a helper function that returns the value of the -// given environment variable, if one exists, or the default value -// otherwise. -func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return dv, nil - } -} - -// MultiEnvDefaultFunc is a helper function that returns the value of the first -// environment variable in the given list that returns a non-empty value. If -// none of the environment variables return a value, the default value is -// returned. -func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v, nil - } - } - return dv, nil - } -} - -// SchemaSetFunc is a function that must return a unique ID for the given -// element. This unique ID is used to store the element in a hash. -type SchemaSetFunc func(interface{}) int - -// SchemaStateFunc is a function used to convert some type to a string -// to be stored in the state. -type SchemaStateFunc func(interface{}) string - -// SchemaValidateFunc is a function used to validate a single field in the -// schema. -type SchemaValidateFunc func(interface{}, string) ([]string, []error) - -func (s *Schema) GoString() string { - return fmt.Sprintf("*%#v", *s) -} - -// Returns a default value for this schema by either reading Default or -// evaluating DefaultFunc. If neither of these are defined, returns nil. -func (s *Schema) DefaultValue() (interface{}, error) { - if s.Default != nil { - return s.Default, nil - } - - if s.DefaultFunc != nil { - defaultValue, err := s.DefaultFunc() - if err != nil { - return nil, fmt.Errorf("error loading default: %s", err) - } - return defaultValue, nil - } - - return nil, nil -} - -// Returns a zero value for the schema. -func (s *Schema) ZeroValue() interface{} { - // If it's a set then we'll do a bit of extra work to provide the - // right hashing function in our empty value. - if s.Type == TypeSet { - setFunc := s.Set - if setFunc == nil { - // Default set function uses the schema to hash the whole value - elem := s.Elem - switch t := elem.(type) { - case *Schema: - setFunc = HashSchema(t) - case *Resource: - setFunc = HashResource(t) - default: - panic("invalid set element type") - } - } - return &Set{F: setFunc} - } else { - return s.Type.Zero() - } -} - -func (s *Schema) finalizeDiff( - d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { - if d == nil { - return d - } - - if s.Type == TypeBool { - normalizeBoolString := func(s string) string { - switch s { - case "0": - return "false" - case "1": - return "true" - } - return s - } - d.Old = normalizeBoolString(d.Old) - d.New = normalizeBoolString(d.New) - } - - if s.Computed && !d.NewRemoved && d.New == "" { - // Computed attribute without a new value set - d.NewComputed = true - } - - if s.ForceNew { - // ForceNew, mark that this field is requiring new under the - // following conditions, explained below: - // - // * Old != New - There is a change in value. This field - // is therefore causing a new resource. - // - // * NewComputed - This field is being computed, hence a - // potential change in value, mark as causing a new resource. - d.RequiresNew = d.Old != d.New || d.NewComputed - } - - if d.NewRemoved { - return d - } - - if s.Computed { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil - } - - if d.New == "" { - // Computed attribute without a new value set - d.NewComputed = true - } - } - - if s.Sensitive { - // Set the Sensitive flag so output is hidden in the UI - d.Sensitive = true - } - - return d -} - -// schemaMap is a wrapper that adds nice functions on top of schemas. -type schemaMap map[string]*Schema - -// Data returns a ResourceData for the given schema, state, and diff. -// -// The diff is optional. -func (m schemaMap) Data( - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*ResourceData, error) { - return &ResourceData{ - schema: m, - state: s, - diff: d, - }, nil -} - -// Diff returns the diff for a resource given the schema map, -// state, and configuration. -func (m schemaMap) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - result := new(terraform.InstanceDiff) - result.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Make sure to mark if the resource is tainted - if s != nil { - result.DestroyTainted = s.Tainted - } - - d := &ResourceData{ - schema: m, - state: s, - config: c, - } - - for k, schema := range m { - err := m.diff(k, schema, result, d, false) - if err != nil { - return nil, err - } - } - - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted - - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() - - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) - if err != nil { - return nil, err - } - } - - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue - } - - if attr.RequiresNew { - attr.RequiresNew = false - } - - if s != nil { - attr.Old = s.Attributes[k] - } - } - - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue - } - - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr - } - - if attr.RequiresNew { - newAttr.RequiresNew = true - } - - result2.Attributes[k] = newAttr - } - - // And set the diff! - result = result2 - } - - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) - } - } - - // Go through and detect all of the ComputedWhens now that we've - // finished the diff. - // TODO - - if result.Empty() { - // If we don't have any diff elements, just return nil - return nil, nil - } - - return result, nil -} - -// Input implements the terraform.ResourceProvider method by asking -// for input for required configuration keys that don't have a value. -func (m schemaMap) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - keys := make([]string, 0, len(m)) - for k, _ := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := m[k] - - // Skip things that don't require config, if that is even valid - // for a provider schema. - // Required XOR Optional must always be true to validate, so we only - // need to check one. - if v.Optional { - continue - } - - // Deprecated fields should never prompt - if v.Deprecated != "" { - continue - } - - // Skip things that have a value of some sort already - if _, ok := c.Raw[k]; ok { - continue - } - - // Skip if it has a default value - defaultValue, err := v.DefaultValue() - if err != nil { - return nil, fmt.Errorf("%s: error loading default: %s", k, err) - } - if defaultValue != nil { - continue - } - - var value interface{} - switch v.Type { - case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: - continue - case TypeString: - value, err = m.inputString(input, k, v) - default: - panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) - } - - if err != nil { - return nil, fmt.Errorf( - "%s: %s", k, err) - } - - c.Config[k] = value - } - - return c, nil -} - -// Validate validates the configuration against this schema mapping. -func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return m.validateObject("", m, c) -} - -// InternalValidate validates the format of this schema. This should be called -// from a unit test (and not in user-path code) to verify that a schema -// is properly built. -func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { - if topSchemaMap == nil { - topSchemaMap = m - } - for k, v := range m { - if v.Type == TypeInvalid { - return fmt.Errorf("%s: Type must be specified", k) - } - - if v.Optional && v.Required { - return fmt.Errorf("%s: Optional or Required must be set, not both", k) - } - - if v.Required && v.Computed { - return fmt.Errorf("%s: Cannot be both Required and Computed", k) - } - - if !v.Required && !v.Optional && !v.Computed { - return fmt.Errorf("%s: One of optional, required, or computed must be set", k) - } - - if v.Computed && v.Default != nil { - return fmt.Errorf("%s: Default must be nil if computed", k) - } - - if v.Required && v.Default != nil { - return fmt.Errorf("%s: Default cannot be set with Required", k) - } - - if len(v.ComputedWhen) > 0 && !v.Computed { - return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) - } - - if len(v.ConflictsWith) > 0 && v.Required { - return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) - } - - if len(v.ConflictsWith) > 0 { - for _, key := range v.ConflictsWith { - parts := strings.Split(key, ".") - sm := topSchemaMap - var target *Schema - for _, part := range parts { - // Skip index fields - if _, err := strconv.Atoi(part); err == nil { - continue - } - - var ok bool - if target, ok = sm[part]; !ok { - return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key) - } - - if subResource, ok := target.Elem.(*Resource); ok { - sm = schemaMap(subResource.Schema) - } - } - if target == nil { - return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) - } - if target.Required { - return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) - } - - if len(target.ComputedWhen) > 0 { - return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) - } - } - } - - if v.Type == TypeList || v.Type == TypeSet { - if v.Elem == nil { - return fmt.Errorf("%s: Elem must be set for lists", k) - } - - if v.Default != nil { - return fmt.Errorf("%s: Default is not valid for lists or sets", k) - } - - if v.Type != TypeSet && v.Set != nil { - return fmt.Errorf("%s: Set can only be set for TypeSet", k) - } - - switch t := v.Elem.(type) { - case *Resource: - if err := t.InternalValidate(topSchemaMap, true); err != nil { - return err - } - case *Schema: - bad := t.Computed || t.Optional || t.Required - if bad { - return fmt.Errorf( - "%s: Elem must have only Type set", k) - } - } - } else { - if v.MaxItems > 0 || v.MinItems > 0 { - return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) - } - } - - // Computed-only field - if v.Computed && !v.Optional { - if v.ValidateFunc != nil { - return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ - "there's nothing to validate on computed-only field", k) - } - if v.DiffSuppressFunc != nil { - return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ - " between config and state representation. "+ - "There is no config for computed-only field, nothing to compare.", k) - } - } - - if v.ValidateFunc != nil { - switch v.Type { - case TypeList, TypeSet: - return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) - } - } - - if v.Deprecated == "" && v.Removed == "" { - if !isValidFieldName(k) { - return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) - } - } - } - - return nil -} - -func isValidFieldName(name string) bool { - re := regexp.MustCompile("^[a-z0-9_]+$") - return re.MatchString(name) -} - -func (m schemaMap) diff( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d *ResourceData, - all bool) error { - - unsupressedDiff := new(terraform.InstanceDiff) - unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - var err error - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - err = m.diffString(k, schema, unsupressedDiff, d, all) - case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) - case TypeMap: - err = m.diffMap(k, schema, unsupressedDiff, d, all) - case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) - default: - err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) - } - - for attrK, attrV := range unsupressedDiff.Attributes { - if schema.DiffSuppressFunc != nil && - attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) { - continue - } - - diff.Attributes[attrK] = attrV - } - - return err -} - -func (m schemaMap) diffList( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d *ResourceData, - all bool) error { - o, n, _, computedList := d.diffChange(k) - if computedList { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedList && schema.Computed { - return nil - } - - if o == nil { - o = []interface{}{} - } - if n == nil { - n = []interface{}{} - } - if s, ok := o.(*Set); ok { - o = s.List() - } - if s, ok := n.(*Set); ok { - n = s.List() - } - os := o.([]interface{}) - vs := n.([]interface{}) - - // If the new value was set, and the two are equal, then we're done. - // We have to do this check here because sets might be NOT - // reflect.DeepEqual so we need to wait until we get the []interface{} - if !all && nSet && reflect.DeepEqual(os, vs) { - return nil - } - - // Get the counts - oldLen := len(os) - newLen := len(vs) - oldStr := strconv.FormatInt(int64(oldLen), 10) - - // If the whole list is computed, then say that the # is computed - if computedList { - diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ - Old: oldStr, - NewComputed: true, - RequiresNew: schema.ForceNew, - } - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - computed := oldLen == 0 && newLen == 0 && schema.Computed - if changed || computed || all { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - newStr := "" - if !computed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) - } - - // Figure out the maximum - maxLen := oldLen - if newLen > maxLen { - maxLen = newLen - } - - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for i := 0; i < maxLen; i++ { - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) - if err != nil { - return err - } - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeList). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - for i := 0; i < maxLen; i++ { - subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) - if err != nil { - return err - } - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - - return nil -} - -func (m schemaMap) diffMap( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d *ResourceData, - all bool) error { - prefix := k + "." - - // First get all the values from the state - var stateMap, configMap map[string]string - o, n, _, nComputed := d.diffChange(k) - if err := mapstructure.WeakDecode(o, &stateMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(n, &configMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - // Keep track of whether the state _exists_ at all prior to clearing it - stateExists := o != nil - - // Delete any count values, since we don't use those - delete(configMap, "%") - delete(stateMap, "%") - - // Check if the number of elements has changed. - oldLen, newLen := len(stateMap), len(configMap) - changed := oldLen != newLen - if oldLen != 0 && newLen == 0 && schema.Computed { - changed = false - } - - // It is computed if we have no old value, no new value, the schema - // says it is computed, and it didn't exist in the state before. The - // last point means: if it existed in the state, even empty, then it - // has already been computed. - computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists - - // If the count has changed or we're computed, then add a diff for the - // count. "nComputed" means that the new value _contains_ a value that - // is computed. We don't do granular diffs for this yet, so we mark the - // whole map as computed. - if changed || computed || nComputed { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed || nComputed, - ForceNew: schema.ForceNew, - } - - oldStr := strconv.FormatInt(int64(oldLen), 10) - newStr := "" - if !computed && !nComputed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".%"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - ) - } - - // If the new map is nil and we're computed, then ignore it. - if n == nil && schema.Computed { - return nil - } - - // Now we compare, preferring values from the config map - for k, v := range configMap { - old, ok := stateMap[k] - delete(stateMap, k) - - if old == v && ok && !all { - continue - } - - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: old, - New: v, - }) - } - for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }) - } - - return nil -} - -func (m schemaMap) diffSet( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d *ResourceData, - all bool) error { - - o, n, _, computedSet := d.diffChange(k) - if computedSet { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedSet && schema.Computed { - return nil - } - - if o == nil { - o = schema.ZeroValue().(*Set) - } - if n == nil { - n = schema.ZeroValue().(*Set) - } - os := o.(*Set) - ns := n.(*Set) - - // If the new value was set, compare the listCode's to determine if - // the two are equal. Comparing listCode's instead of the actual values - // is needed because there could be computed values in the set which - // would result in false positives while comparing. - if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { - return nil - } - - // Get the counts - oldLen := os.Len() - newLen := ns.Len() - oldStr := strconv.Itoa(oldLen) - newStr := strconv.Itoa(newLen) - - // Build a schema for our count - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - // If the set computed then say that the # is computed - if computedSet || schema.Computed && !nSet { - // If # already exists, equals 0 and no new set is supplied, there - // is nothing to record in the diff - count, ok := d.GetOk(k + ".#") - if ok && count.(int) == 0 && !nSet && !computedSet { - return nil - } - - // Set the count but make sure that if # does not exist, we don't - // use the zeroed value - countStr := strconv.Itoa(count.(int)) - if !ok { - countStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }) - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) - } - - // Build the list of codes that will make up our set. This is the - // removed codes as well as all the codes in the new codes. - codes := make([][]string, 2) - codes[0] = os.Difference(ns).listCode() - codes[1] = ns.listCode() - for _, list := range codes { - for _, code := range list { - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) - if err != nil { - return err - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeSet). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) - if err != nil { - return err - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - } - } - - return nil -} - -func (m schemaMap) diffString( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d *ResourceData, - all bool) error { - var originalN interface{} - var os, ns string - o, n, _, computed := d.diffChange(k) - if schema.StateFunc != nil && n != nil { - originalN = n - n = schema.StateFunc(n) - } - nraw := n - if nraw == nil && o != nil { - nraw = schema.Type.Zero() - } - if err := mapstructure.WeakDecode(o, &os); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(nraw, &ns); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - if os == ns && !all { - // They're the same value. If there old value is not blank or we - // have an ID, then return right away since we're already setup. - if os != "" || d.Id() != "" { - return nil - } - - // Otherwise, only continue if we're computed - if !schema.Computed && !computed { - return nil - } - } - - removed := false - if o != nil && n == nil { - removed = true - } - if removed && schema.Computed { - return nil - } - - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }) - - return nil -} - -func (m schemaMap) inputString( - input terraform.UIInput, - k string, - schema *Schema) (interface{}, error) { - result, err := input.Input(&terraform.InputOpts{ - Id: k, - Query: k, - Description: schema.Description, - Default: schema.InputDefault, - }) - - return result, err -} - -func (m schemaMap) validate( - k string, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, ok := c.Get(k) - if !ok && schema.DefaultFunc != nil { - // We have a dynamic default. Check if we have a value. - var err error - raw, err = schema.DefaultFunc() - if err != nil { - return nil, []error{fmt.Errorf( - "%q, error loading default: %s", k, err)} - } - - // We're okay as long as we had a value set - ok = raw != nil - } - if !ok { - if schema.Required { - return nil, []error{fmt.Errorf( - "%q: required field is not set", k)} - } - - return nil, nil - } - - if !schema.Required && !schema.Optional { - // This is a computed-only field - return nil, []error{fmt.Errorf( - "%q: this field cannot be set", k)} - } - - err := m.validateConflictingAttributes(k, schema, c) - if err != nil { - return nil, []error{err} - } - - return m.validateType(k, raw, schema, c) -} - -func (m schemaMap) validateConflictingAttributes( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.ConflictsWith) == 0 { - return nil - } - - for _, conflicting_key := range schema.ConflictsWith { - if value, ok := c.Get(conflicting_key); ok { - return fmt.Errorf( - "%q: conflicts with %s (%#v)", k, conflicting_key, value) - } - } - - return nil -} - -func (m schemaMap) validateList( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - - // If we support promotion and the raw value isn't a slice, wrap - // it in []interface{} and check again. - if schema.PromoteSingle && rawV.Kind() != reflect.Slice { - raw = []interface{}{raw} - rawV = reflect.ValueOf(raw) - } - - if rawV.Kind() != reflect.Slice { - return nil, []error{fmt.Errorf( - "%s: should be a list", k)} - } - - // Validate length - if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} - } - - if schema.MinItems > 0 && rawV.Len() < schema.MinItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} - } - - // Now build the []interface{} - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - var ws []string - var es []error - for i, raw := range raws { - key := fmt.Sprintf("%s.%d", k, i) - - // Reify the key value from the ResourceConfig. - // If the list was computed we have all raw values, but some of these - // may be known in the config, and aren't individually marked as Computed. - if r, ok := c.Get(key); ok { - raw = r - } - - var ws2 []string - var es2 []error - switch t := schema.Elem.(type) { - case *Resource: - // This is a sub-resource - ws2, es2 = m.validateObject(key, t.Schema, c) - case *Schema: - ws2, es2 = m.validateType(key, raw, t, c) - } - - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - return ws, es -} - -func (m schemaMap) validateMap( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - switch rawV.Kind() { - case reflect.String: - // If raw and reified are equal, this is a string and should - // be rejected. - reified, reifiedOk := c.Get(k) - if reifiedOk && raw == reified && !c.IsComputed(k) { - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - // Otherwise it's likely raw is an interpolation. - return nil, nil - case reflect.Map: - case reflect.Slice: - default: - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - - // If it is not a slice, validate directly - if rawV.Kind() != reflect.Slice { - mapIface := rawV.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(mapIface, k) - } - return nil, nil - } - - // It is a slice, verify that all the elements are maps - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - for _, raw := range raws { - v := reflect.ValueOf(raw) - if v.Kind() != reflect.Map { - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} - } - mapIface := v.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - } - - if schema.ValidateFunc != nil { - validatableMap := make(map[string]interface{}) - for _, raw := range raws { - for k, v := range raw.(map[string]interface{}) { - validatableMap[k] = v - } - } - - return schema.ValidateFunc(validatableMap, k) - } - - return nil, nil -} - -func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { - for key, raw := range m { - valueType, err := getValueType(k, schema) - if err != nil { - return nil, []error{err} - } - - switch valueType { - case TypeBool: - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeInt: - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeFloat: - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeString: - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - } - return nil, nil -} - -func getValueType(k string, schema *Schema) (ValueType, error) { - if schema.Elem == nil { - return TypeString, nil - } - if vt, ok := schema.Elem.(ValueType); ok { - return vt, nil - } - - if s, ok := schema.Elem.(*Schema); ok { - if s.Elem == nil { - return TypeString, nil - } - if vt, ok := s.Elem.(ValueType); ok { - return vt, nil - } - } - - if _, ok := schema.Elem.(*Resource); ok { - // TODO: We don't actually support this (yet) - // but silently pass the validation, until we decide - // how to handle nested structures in maps - return TypeString, nil - } - return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) -} - -func (m schemaMap) validateObject( - k string, - schema map[string]*Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, _ := c.Get(k) - if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { - return nil, []error{fmt.Errorf( - "%s: expected object, got %s", - k, reflect.ValueOf(raw).Kind())} - } - - var ws []string - var es []error - for subK, s := range schema { - key := subK - if k != "" { - key = fmt.Sprintf("%s.%s", k, subK) - } - - ws2, es2 := m.validate(key, s, c) - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - // Detect any extra/unknown keys and report those as errors. - if m, ok := raw.(map[string]interface{}); ok { - for subk, _ := range m { - if _, ok := schema[subk]; !ok { - if subk == TimeoutsConfigKey { - continue - } - es = append(es, fmt.Errorf( - "%s: invalid or unknown key: %s", k, subk)) - } - } - } - - return ws, es -} - -func (m schemaMap) validatePrimitive( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - - // Catch if the user gave a complex type where a primitive was - // expected, so we can return a friendly error message that - // doesn't contain Go type system terminology. - switch reflect.ValueOf(raw).Type().Kind() { - case reflect.Slice: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a list", k), - } - case reflect.Map: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a map", k), - } - default: // ok - } - - if c.IsComputed(k) { - // If the key is being computed, then it is not an error as - // long as it's not a slice or map. - return nil, nil - } - - var decoded interface{} - switch schema.Type { - case TypeBool: - // Verify that we can parse this as the correct type - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeInt: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeFloat: - // Verify that we can parse this as an int - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeString: - // Verify that we can parse this as a string - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - - if schema.ValidateFunc != nil { - return schema.ValidateFunc(decoded, k) - } - - return nil, nil -} - -func (m schemaMap) validateType( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - var ws []string - var es []error - switch schema.Type { - case TypeSet, TypeList: - ws, es = m.validateList(k, raw, schema, c) - case TypeMap: - ws, es = m.validateMap(k, raw, schema, c) - default: - ws, es = m.validatePrimitive(k, raw, schema, c) - } - - if schema.Deprecated != "" { - ws = append(ws, fmt.Sprintf( - "%q: [DEPRECATED] %s", k, schema.Deprecated)) - } - - if schema.Removed != "" { - es = append(es, fmt.Errorf( - "%q: [REMOVED] %s", k, schema.Removed)) - } - - return ws, es -} - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return new(Set) - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go deleted file mode 100644 index fe6d750..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go +++ /dev/null @@ -1,125 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "sort" - "strconv" -) - -func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { - if val == nil { - buf.WriteRune(';') - return - } - - switch schema.Type { - case TypeBool: - if val.(bool) { - buf.WriteRune('1') - } else { - buf.WriteRune('0') - } - case TypeInt: - buf.WriteString(strconv.Itoa(val.(int))) - case TypeFloat: - buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) - case TypeString: - buf.WriteString(val.(string)) - case TypeList: - buf.WriteRune('(') - l := val.([]interface{}) - for _, innerVal := range l { - serializeCollectionMemberForHash(buf, innerVal, schema.Elem) - } - buf.WriteRune(')') - case TypeMap: - - m := val.(map[string]interface{}) - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - buf.WriteRune('[') - for _, k := range keys { - innerVal := m[k] - if innerVal == nil { - continue - } - buf.WriteString(k) - buf.WriteRune(':') - - switch innerVal := innerVal.(type) { - case int: - buf.WriteString(strconv.Itoa(innerVal)) - case float64: - buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) - case string: - buf.WriteString(innerVal) - default: - panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) - } - - buf.WriteRune(';') - } - buf.WriteRune(']') - case TypeSet: - buf.WriteRune('{') - s := val.(*Set) - for _, innerVal := range s.List() { - serializeCollectionMemberForHash(buf, innerVal, schema.Elem) - } - buf.WriteRune('}') - default: - panic("unknown schema type to serialize") - } - buf.WriteRune(';') -} - -// SerializeValueForHash appends a serialization of the given resource config -// to the given buffer, guaranteeing deterministic results given the same value -// and schema. -// -// Its primary purpose is as input into a hashing function in order -// to hash complex substructures when used in sets, and so the serialization -// is not reversible. -func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { - if val == nil { - return - } - sm := resource.Schema - m := val.(map[string]interface{}) - var keys []string - for k := range sm { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - innerSchema := sm[k] - // Skip attributes that are not user-provided. Computed attributes - // do not contribute to the hash since their ultimate value cannot - // be known at plan/diff time. - if !(innerSchema.Required || innerSchema.Optional) { - continue - } - - buf.WriteString(k) - buf.WriteRune(':') - innerVal := m[k] - SerializeValueForHash(buf, innerVal, innerSchema) - } -} - -func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { - switch tElem := elem.(type) { - case *Schema: - SerializeValueForHash(buf, val, tElem) - case *Resource: - buf.WriteRune('<') - SerializeResourceForHash(buf, val, tElem) - buf.WriteString(">;") - default: - panic(fmt.Sprintf("invalid element type: %T", tElem)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go deleted file mode 100644 index bb194ee..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ /dev/null @@ -1,234 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "sync" - - "github.com/hashicorp/terraform/helper/hashcode" -) - -// HashString hashes strings. If you want a Set of strings, this is the -// SchemaSetFunc you want. -func HashString(v interface{}) int { - return hashcode.String(v.(string)) -} - -// HashResource hashes complex structures that are described using -// a *Resource. This is the default set implementation used when a set's -// element type is a full resource. -func HashResource(resource *Resource) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeResourceForHash(&buf, v, resource) - return hashcode.String(buf.String()) - } -} - -// HashSchema hashes values that are described using a *Schema. This is the -// default set implementation used when a set's element type is a single -// schema. -func HashSchema(schema *Schema) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeValueForHash(&buf, v, schema) - return hashcode.String(buf.String()) - } -} - -// Set is a set data structure that is returned for elements of type -// TypeSet. -type Set struct { - F SchemaSetFunc - - m map[string]interface{} - once sync.Once -} - -// NewSet is a convenience method for creating a new set with the given -// items. -func NewSet(f SchemaSetFunc, items []interface{}) *Set { - s := &Set{F: f} - for _, i := range items { - s.Add(i) - } - - return s -} - -// CopySet returns a copy of another set. -func CopySet(otherSet *Set) *Set { - return NewSet(otherSet.F, otherSet.List()) -} - -// Add adds an item to the set if it isn't already in the set. -func (s *Set) Add(item interface{}) { - s.add(item, false) -} - -// Remove removes an item if it's already in the set. Idempotent. -func (s *Set) Remove(item interface{}) { - s.remove(item) -} - -// Contains checks if the set has the given item. -func (s *Set) Contains(item interface{}) bool { - _, ok := s.m[s.hash(item)] - return ok -} - -// Len returns the amount of items in the set. -func (s *Set) Len() int { - return len(s.m) -} - -// List returns the elements of this set in slice format. -// -// The order of the returned elements is deterministic. Given the same -// set, the order of this will always be the same. -func (s *Set) List() []interface{} { - result := make([]interface{}, len(s.m)) - for i, k := range s.listCode() { - result[i] = s.m[k] - } - - return result -} - -// Difference performs a set difference of the two sets, returning -// a new third set that has only the elements unique to this set. -func (s *Set) Difference(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; !ok { - result.m[k] = v - } - } - - return result -} - -// Intersection performs the set intersection of the two sets -// and returns a new third set. -func (s *Set) Intersection(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; ok { - result.m[k] = v - } - } - - return result -} - -// Union performs the set union of the two sets and returns a new third -// set. -func (s *Set) Union(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - result.m[k] = v - } - for k, v := range other.m { - result.m[k] = v - } - - return result -} - -func (s *Set) Equal(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - return reflect.DeepEqual(s.m, other.m) -} - -// HashEqual simply checks to the keys the top-level map to the keys in the -// other set's top-level map to see if they are equal. This obviously assumes -// you have a properly working hash function - use HashResource if in doubt. -func (s *Set) HashEqual(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - ks1 := make([]string, 0) - ks2 := make([]string, 0) - - for k := range s.m { - ks1 = append(ks1, k) - } - for k := range other.m { - ks2 = append(ks2, k) - } - - sort.Strings(ks1) - sort.Strings(ks2) - - return reflect.DeepEqual(ks1, ks2) -} - -func (s *Set) GoString() string { - return fmt.Sprintf("*Set(%#v)", s.m) -} - -func (s *Set) init() { - s.m = make(map[string]interface{}) -} - -func (s *Set) add(item interface{}, computed bool) string { - s.once.Do(s.init) - - code := s.hash(item) - if computed { - code = "~" + code - } - - if _, ok := s.m[code]; !ok { - s.m[code] = item - } - - return code -} - -func (s *Set) hash(item interface{}) string { - code := s.F(item) - // Always return a nonnegative hashcode. - if code < 0 { - code = -code - } - return strconv.Itoa(code) -} - -func (s *Set) remove(item interface{}) string { - s.once.Do(s.init) - - code := s.hash(item) - delete(s.m, code) - - return code -} - -func (s *Set) index(item interface{}) int { - return sort.SearchStrings(s.listCode(), s.hash(item)) -} - -func (s *Set) listCode() []string { - // Sort the hash codes so the order of the list is deterministic - keys := make([]string, 0, len(s.m)) - for k := range s.m { - keys = append(keys, k) - } - sort.Sort(sort.StringSlice(keys)) - return keys -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go deleted file mode 100644 index 9765bdb..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ /dev/null @@ -1,30 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -// TestResourceDataRaw creates a ResourceData from a raw configuration map. -func TestResourceDataRaw( - t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { - c, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - sm := schemaMap(schema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := sm.Data(nil, diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go deleted file mode 100644 index 9286987..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go +++ /dev/null @@ -1,21 +0,0 @@ -package schema - -//go:generate stringer -type=ValueType valuetype.go - -// ValueType is an enum of the type that can be represented by a schema. -type ValueType int - -const ( - TypeInvalid ValueType = iota - TypeBool - TypeInt - TypeFloat - TypeString - TypeList - TypeMap - TypeSet - typeObject -) - -// NOTE: ValueType has more functions defined on it in schema.go. We can't -// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go deleted file mode 100644 index 1610cec..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. - -package schema - -import "fmt" - -const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" - -var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} - -func (i ValueType) String() string { - if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) - } - return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go deleted file mode 100644 index 7edd5e7..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go +++ /dev/null @@ -1,80 +0,0 @@ -package shadow - -import ( - "fmt" - "io" - "reflect" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/reflectwalk" -) - -// Close will close all shadow values within the given structure. -// -// This uses reflection to walk the structure, find all shadow elements, -// and close them. Currently this will only find struct fields that are -// shadow values, and not slice elements, etc. -func Close(v interface{}) error { - // We require a pointer so we can address the internal fields - val := reflect.ValueOf(v) - if val.Kind() != reflect.Ptr { - return fmt.Errorf("value must be a pointer") - } - - // Walk and close - var w closeWalker - if err := reflectwalk.Walk(v, &w); err != nil { - return err - } - - return w.Err -} - -type closeWalker struct { - Err error -} - -func (w *closeWalker) Struct(reflect.Value) error { - // Do nothing. We implement this for reflectwalk.StructWalker - return nil -} - -func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error { - // Not sure why this would be but lets avoid some panics - if !v.IsValid() { - return nil - } - - // Empty for exported, so don't check unexported fields - if f.PkgPath != "" { - return nil - } - - // Verify the io.Closer is in this package - typ := v.Type() - if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" { - return nil - } - - // We're looking for an io.Closer - raw := v.Interface() - if raw == nil { - return nil - } - - closer, ok := raw.(io.Closer) - if !ok && v.CanAddr() { - closer, ok = v.Addr().Interface().(io.Closer) - } - if !ok { - return reflectwalk.SkipEntry - } - - // Close it - if err := closer.Close(); err != nil { - w.Err = multierror.Append(w.Err, err) - } - - // Don't go into the struct field - return reflectwalk.SkipEntry -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go deleted file mode 100644 index 4223e92..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go +++ /dev/null @@ -1,128 +0,0 @@ -package shadow - -import ( - "sync" -) - -// ComparedValue is a struct that finds a value by comparing some key -// to the list of stored values. This is useful when there is no easy -// uniquely identifying key that works in a map (for that, use KeyedValue). -// -// ComparedValue is very expensive, relative to other Value types. Try to -// limit the number of values stored in a ComparedValue by potentially -// nesting it within a KeyedValue (a keyed value points to a compared value, -// for example). -type ComparedValue struct { - // Func is a function that is given the lookup key and a single - // stored value. If it matches, it returns true. - Func func(k, v interface{}) bool - - lock sync.Mutex - once sync.Once - closed bool - values []interface{} - waiters map[interface{}]*Value -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *ComparedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *ComparedValue) Value(k interface{}) interface{} { - v, val := w.valueWaiter(k) - if val == nil { - return v - } - - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) { - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *ComparedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.once.Do(w.init) - - // Check if we already have this exact value (by simply comparing - // with == directly). If we do, then we don't insert it again. - found := false - for _, v2 := range w.values { - if v == v2 { - found = true - break - } - } - - if !found { - // Set the value, always - w.values = append(w.values, v) - } - - // Go through the waiters - for k, val := range w.waiters { - if w.Func(k, v) { - val.SetValue(v) - delete(w.waiters, k) - } - } -} - -func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) { - w.lock.Lock() - w.once.Do(w.init) - - // Look for a pre-existing value - for _, v := range w.values { - if w.Func(k, v) { - w.lock.Unlock() - return v, nil - } - } - - // If we're closed, return that - if w.closed { - w.lock.Unlock() - return ErrClosed, nil - } - - // Pre-existing value doesn't exist, create a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // Return the waiter - return nil, val -} - -// Must be called with w.lock held. -func (w *ComparedValue) init() { - w.waiters = make(map[interface{}]*Value) - if w.Func == nil { - w.Func = func(k, v interface{}) bool { return k == v } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go deleted file mode 100644 index 432b036..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go +++ /dev/null @@ -1,151 +0,0 @@ -package shadow - -import ( - "sync" -) - -// KeyedValue is a struct that coordinates a value by key. If a value is -// not available for a give key, it'll block until it is available. -type KeyedValue struct { - lock sync.Mutex - once sync.Once - values map[string]interface{} - waiters map[string]*Value - closed bool -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *KeyedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *KeyedValue) Value(k string) interface{} { - w.lock.Lock() - v, val := w.valueWaiter(k) - w.lock.Unlock() - - // If we have no waiter, then return the value - if val == nil { - return v - } - - // We have a waiter, so wait - return val.Value() -} - -// WaitForChange waits for the value with the given key to be set again. -// If the key isn't set, it'll wait for an initial value. Note that while -// it is called "WaitForChange", the value isn't guaranteed to _change_; -// this will return when a SetValue is called for the given k. -func (w *KeyedValue) WaitForChange(k string) interface{} { - w.lock.Lock() - w.once.Do(w.init) - - // If we're closed, we're closed - if w.closed { - w.lock.Unlock() - return ErrClosed - } - - // Check for an active waiter. If there isn't one, make it - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // And wait - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *KeyedValue) ValueOk(k string) (interface{}, bool) { - w.lock.Lock() - defer w.lock.Unlock() - - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *KeyedValue) SetValue(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.setValue(k, v) -} - -// Init will initialize the key to a given value only if the key has -// not been set before. This is safe to call multiple times and in parallel. -func (w *KeyedValue) Init(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, set the value. - _, val := w.valueWaiter(k) - if val != nil { - w.setValue(k, v) - } -} - -// Must be called with w.lock held. -func (w *KeyedValue) init() { - w.values = make(map[string]interface{}) - w.waiters = make(map[string]*Value) -} - -// setValue is like SetValue but assumes the lock is held. -func (w *KeyedValue) setValue(k string, v interface{}) { - w.once.Do(w.init) - - // Set the value, always - w.values[k] = v - - // If we have a waiter, set it - if val, ok := w.waiters[k]; ok { - val.SetValue(v) - delete(w.waiters, k) - } -} - -// valueWaiter gets the value or the Value waiter for a given key. -// -// This must be called with lock held. -func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) { - w.once.Do(w.init) - - // If we have this value already, return it - if v, ok := w.values[k]; ok { - return v, nil - } - - // If we're closed, return that - if w.closed { - return ErrClosed, nil - } - - // No pending value, check for a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - - // Return the waiter - return nil, val -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go deleted file mode 100644 index 0a43d4d..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go +++ /dev/null @@ -1,66 +0,0 @@ -package shadow - -import ( - "container/list" - "sync" -) - -// OrderedValue is a struct that keeps track of a value in the order -// it is set. Each time Value() is called, it will return the most recent -// calls value then discard it. -// -// This is unlike Value that returns the same value once it is set. -type OrderedValue struct { - lock sync.Mutex - values *list.List - waiters *list.List -} - -// Value returns the last value that was set, or blocks until one -// is received. -func (w *OrderedValue) Value() interface{} { - w.lock.Lock() - - // If we have a pending value already, use it - if w.values != nil && w.values.Len() > 0 { - front := w.values.Front() - w.values.Remove(front) - w.lock.Unlock() - return front.Value - } - - // No pending value, create a waiter - if w.waiters == nil { - w.waiters = list.New() - } - - var val Value - w.waiters.PushBack(&val) - w.lock.Unlock() - - // Return the value once we have it - return val.Value() -} - -// SetValue sets the latest value. -func (w *OrderedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, notify it - if w.waiters != nil && w.waiters.Len() > 0 { - front := w.waiters.Front() - w.waiters.Remove(front) - - val := front.Value.(*Value) - val.SetValue(v) - return - } - - // Add it to the list of values - if w.values == nil { - w.values = list.New() - } - - w.values.PushBack(v) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go deleted file mode 100644 index 2413335..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go +++ /dev/null @@ -1,79 +0,0 @@ -package shadow - -import ( - "errors" - "sync" -) - -// ErrClosed is returned by any closed values. -// -// A "closed value" is when the shadow has been notified that the real -// side is complete and any blocking values will _never_ be satisfied -// in the future. In this case, this error is returned. If a value is already -// available, that is still returned. -var ErrClosed = errors.New("shadow closed") - -// Value is a struct that coordinates a value between two -// parallel routines. It is similar to atomic.Value except that when -// Value is called if it isn't set it will wait for it. -// -// The Value can be closed with Close, which will cause any future -// blocking operations to return immediately with ErrClosed. -type Value struct { - lock sync.Mutex - cond *sync.Cond - value interface{} - valueSet bool -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the struct docs. -func (w *Value) Close() error { - w.lock.Lock() - set := w.valueSet - w.lock.Unlock() - - // If we haven't set the value, set it - if !set { - w.SetValue(ErrClosed) - } - - // Done - return nil -} - -// Value returns the value that was set. -func (w *Value) Value() interface{} { - w.lock.Lock() - defer w.lock.Unlock() - - // If we already have a value just return - for !w.valueSet { - // No value, setup the condition variable if we have to - if w.cond == nil { - w.cond = sync.NewCond(&w.lock) - } - - // Wait on it - w.cond.Wait() - } - - // Return the value - return w.value -} - -// SetValue sets the value. -func (w *Value) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // Set the value - w.valueSet = true - w.value = v - - // If we have a condition, clear it - if w.cond != nil { - w.cond.Broadcast() - w.cond = nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go deleted file mode 100644 index 87c8431..0000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go +++ /dev/null @@ -1,43 +0,0 @@ -package moduledeps - -import ( - "github.com/hashicorp/terraform/plugin/discovery" -) - -// Providers describes a set of provider dependencies for a given module. -// -// Each named provider instance can have one version constraint. -type Providers map[ProviderInstance]ProviderDependency - -// ProviderDependency describes the dependency for a particular provider -// instance, including both the set of allowed versions and the reason for -// the dependency. -type ProviderDependency struct { - Constraints discovery.Constraints - Reason ProviderDependencyReason -} - -// ProviderDependencyReason is an enumeration of reasons why a dependency might be -// present. -type ProviderDependencyReason int - -const ( - // ProviderDependencyExplicit means that there is an explicit "provider" - // block in the configuration for this module. - ProviderDependencyExplicit ProviderDependencyReason = iota - - // ProviderDependencyImplicit means that there is no explicit "provider" - // block but there is at least one resource that uses this provider. - ProviderDependencyImplicit - - // ProviderDependencyInherited is a special case of - // ProviderDependencyImplicit where a parent module has defined a - // configuration for the provider that has been inherited by at least one - // resource in this module. - ProviderDependencyInherited - - // ProviderDependencyFromState means that this provider is not currently - // referenced by configuration at all, but some existing instances in - // the state still depend on it. - ProviderDependencyFromState -) diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go deleted file mode 100644 index 7eff083..0000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package moduledeps contains types that can be used to describe the -// providers required for all of the modules in a module tree. -// -// It does not itself contain the functionality for populating such -// data structures; that's in Terraform core, since this package intentionally -// does not depend on terraform core to avoid package dependency cycles. -package moduledeps diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/module.go b/vendor/github.com/hashicorp/terraform/moduledeps/module.go deleted file mode 100644 index d6cbaf5..0000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/module.go +++ /dev/null @@ -1,204 +0,0 @@ -package moduledeps - -import ( - "sort" - "strings" - - "github.com/hashicorp/terraform/plugin/discovery" -) - -// Module represents the dependencies of a single module, as well being -// a node in a tree of such structures representing the dependencies of -// an entire configuration. -type Module struct { - Name string - Providers Providers - Children []*Module -} - -// WalkFunc is a callback type for use with Module.WalkTree -type WalkFunc func(path []string, parent *Module, current *Module) error - -// WalkTree calls the given callback once for the receiver and then -// once for each descendent, in an order such that parents are called -// before their children and siblings are called in the order they -// appear in the Children slice. -// -// When calling the callback, parent will be nil for the first call -// for the receiving module, and then set to the direct parent of -// each module for the subsequent calls. -// -// The path given to the callback is valid only until the callback -// returns, after which it will be mutated and reused. Callbacks must -// therefore copy the path slice if they wish to retain it. -// -// If the given callback returns an error, the walk will be aborted at -// that point and that error returned to the caller. -// -// This function is not thread-safe for concurrent modifications of the -// data structure, so it's the caller's responsibility to arrange for that -// should it be needed. -// -// It is safe for a callback to modify the descendents of the "current" -// module, including the ordering of the Children slice itself, but the -// callback MUST NOT modify the parent module. -func (m *Module) WalkTree(cb WalkFunc) error { - return walkModuleTree(make([]string, 0, 1), nil, m, cb) -} - -func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { - path = append(path, current.Name) - err := cb(path, parent, current) - if err != nil { - return err - } - - for _, child := range current.Children { - err := walkModuleTree(path, current, child, cb) - if err != nil { - return err - } - } - return nil -} - -// SortChildren sorts the Children slice into lexicographic order by -// name, in-place. -// -// This is primarily useful prior to calling WalkTree so that the walk -// will proceed in a consistent order. -func (m *Module) SortChildren() { - sort.Sort(sortModules{m.Children}) -} - -// SortDescendents is a convenience wrapper for calling SortChildren on -// the receiver and all of its descendent modules. -func (m *Module) SortDescendents() { - m.WalkTree(func(path []string, parent *Module, current *Module) error { - current.SortChildren() - return nil - }) -} - -type sortModules struct { - modules []*Module -} - -func (s sortModules) Len() int { - return len(s.modules) -} - -func (s sortModules) Less(i, j int) bool { - cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) - return cmp < 0 -} - -func (s sortModules) Swap(i, j int) { - s.modules[i], s.modules[j] = s.modules[j], s.modules[i] -} - -// PluginRequirements produces a PluginRequirements structure that can -// be used with discovery.PluginMetaSet.ConstrainVersions to identify -// suitable plugins to satisfy the module's provider dependencies. -// -// This method only considers the direct requirements of the receiver. -// Use AllPluginRequirements to flatten the dependencies for the -// entire tree of modules. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) PluginRequirements() discovery.PluginRequirements { - ret := make(discovery.PluginRequirements) - for inst, dep := range m.Providers { - // m.Providers is keyed on provider names, such as "aws.foo". - // a PluginRequirements wants keys to be provider *types*, such - // as "aws". If there are multiple aliases for the same - // provider then we will flatten them into a single requirement - // by combining their constraint sets. - pty := inst.Type() - if existing, exists := ret[pty]; exists { - ret[pty].Versions = existing.Versions.Append(dep.Constraints) - } else { - ret[pty] = &discovery.PluginConstraints{ - Versions: dep.Constraints, - } - } - } - return ret -} - -// AllPluginRequirements calls PluginRequirements for the receiver and all -// of its descendents, and merges the result into a single PluginRequirements -// structure that would satisfy all of the modules together. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) AllPluginRequirements() discovery.PluginRequirements { - var ret discovery.PluginRequirements - m.WalkTree(func(path []string, parent *Module, current *Module) error { - ret = ret.Merge(current.PluginRequirements()) - return nil - }) - return ret -} - -// Equal returns true if the receiver is the root of an identical tree -// to the other given Module. This is a deep comparison that considers -// the equality of all downstream modules too. -// -// The children are considered to be ordered, so callers may wish to use -// SortDescendents first to normalize the order of the slices of child nodes. -// -// The implementation of this function is not optimized since it is provided -// primarily for use in tests. -func (m *Module) Equal(other *Module) bool { - // take care of nils first - if m == nil && other == nil { - return true - } else if (m == nil && other != nil) || (m != nil && other == nil) { - return false - } - - if m.Name != other.Name { - return false - } - - if len(m.Providers) != len(other.Providers) { - return false - } - if len(m.Children) != len(other.Children) { - return false - } - - // Can't use reflect.DeepEqual on this provider structure because - // the nested Constraints objects contain function pointers that - // never compare as equal. So we'll need to walk it the long way. - for inst, dep := range m.Providers { - if _, exists := other.Providers[inst]; !exists { - return false - } - - if dep.Reason != other.Providers[inst].Reason { - return false - } - - // Constraints are not too easy to compare robustly, so - // we'll just use their string representations as a proxy - // for now. - if dep.Constraints.String() != other.Providers[inst].Constraints.String() { - return false - } - } - - // Above we already checked that we have the same number of children - // in each module, so now we just need to check that they are - // recursively equal. - for i := range m.Children { - if !m.Children[i].Equal(other.Children[i]) { - return false - } - } - - // If we fall out here then they are equal - return true -} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go deleted file mode 100644 index 89ceefb..0000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go +++ /dev/null @@ -1,30 +0,0 @@ -package moduledeps - -import ( - "strings" -) - -// ProviderInstance describes a particular provider instance by its full name, -// like "null" or "aws.foo". -type ProviderInstance string - -// Type returns the provider type of this instance. For example, for an instance -// named "aws.foo" the type is "aws". -func (p ProviderInstance) Type() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - t = t[:dotPos] - } - return t -} - -// Alias returns the alias of this provider, if any. An instance named "aws.foo" -// has the alias "foo", while an instance named just "docker" has no alias, -// so the empty string would be returned. -func (p ProviderInstance) Alias() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - return t[dotPos+1:] - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go deleted file mode 100644 index 7e2f4fe..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/client.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugin - -import ( - "os" - "os/exec" - - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/plugin/discovery" -) - -// ClientConfig returns a configuration object that can be used to instantiate -// a client for the plugin described by the given metadata. -func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { - logger := hclog.New(&hclog.LoggerOptions{ - Name: "plugin", - Level: hclog.Trace, - Output: os.Stderr, - }) - - return &plugin.ClientConfig{ - Cmd: exec.Command(m.Path), - HandshakeConfig: Handshake, - Managed: true, - Plugins: PluginMap, - Logger: logger, - } -} - -// Client returns a plugin client for the plugin described by the given metadata. -func Client(m discovery.PluginMeta) *plugin.Client { - return plugin.NewClient(ClientConfig(m)) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go deleted file mode 100644 index df855a7..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go +++ /dev/null @@ -1,30 +0,0 @@ -package discovery - -// Error is a type used to describe situations that the caller must handle -// since they indicate some form of user error. -// -// The functions and methods that return these specialized errors indicate so -// in their documentation. The Error type should not itself be used directly, -// but rather errors should be compared using the == operator with the -// error constants in this package. -// -// Values of this type are _not_ used when the error being reported is an -// operational error (server unavailable, etc) or indicative of a bug in -// this package or its caller. -type Error string - -// ErrorNoSuitableVersion indicates that a suitable version (meeting given -// constraints) is not available. -const ErrorNoSuitableVersion = Error("no suitable version is available") - -// ErrorNoVersionCompatible indicates that all of the available versions -// that otherwise met constraints are not compatible with the current -// version of Terraform. -const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") - -// ErrorNoSuchProvider indicates that no provider exists with a name given -const ErrorNoSuchProvider = Error("no provider exists with the given name") - -func (err Error) Error() string { - return string(err) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go deleted file mode 100644 index 10f8fce..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go +++ /dev/null @@ -1,167 +0,0 @@ -package discovery - -import ( - "io/ioutil" - "log" - "path/filepath" - "strings" -) - -// FindPlugins looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider") and -// returns a PluginMetaSet representing the discovered potential-plugins. -// -// Currently this supports two different naming schemes. The current -// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing -// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is -// files directly in the given directory whose names are like -// terraform-$KIND-$NAME. -// -// Only one plugin will be returned for each unique plugin (name, version) -// pair, with preference given to files found in earlier directories. -// -// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. -func FindPlugins(kind string, dirs []string) PluginMetaSet { - return ResolvePluginPaths(FindPluginPaths(kind, dirs)) -} - -// FindPluginPaths looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider"). -// -// The return value is a list of absolute paths that appear to refer to -// plugins in the given directories, based only on what can be inferred -// from the naming scheme. The paths returned are ordered such that files -// in later dirs appear after files in earlier dirs in the given directory -// list. Within the same directory plugins are returned in a consistent but -// undefined order. -func FindPluginPaths(kind string, dirs []string) []string { - // This is just a thin wrapper around findPluginPaths so that we can - // use the latter in tests with a fake machineName so we can use our - // test fixtures. - return findPluginPaths(kind, dirs) -} - -func findPluginPaths(kind string, dirs []string) []string { - prefix := "terraform-" + kind + "-" - - ret := make([]string, 0, len(dirs)) - - for _, dir := range dirs { - items, err := ioutil.ReadDir(dir) - if err != nil { - // Ignore missing dirs, non-dirs, etc - continue - } - - log.Printf("[DEBUG] checking for %s in %q", kind, dir) - - for _, item := range items { - fullName := item.Name() - - if !strings.HasPrefix(fullName, prefix) { - continue - } - - // New-style paths must have a version segment in filename - if strings.Contains(strings.ToLower(fullName), "_v") { - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - log.Printf("[DEBUG] found %s %q", kind, fullName) - ret = append(ret, filepath.Clean(absPath)) - continue - } - - // Legacy style with files directly in the base directory - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - log.Printf("[WARNING] found legacy %s %q", kind, fullName) - - ret = append(ret, filepath.Clean(absPath)) - } - } - - return ret -} - -// ResolvePluginPaths takes a list of paths to plugin executables (as returned -// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the -// referenced plugins. -// -// If the same combination of plugin name and version appears multiple times, -// the earlier reference will be preferred. Several different versions of -// the same plugin name may be returned, in which case the methods of -// PluginMetaSet can be used to filter down. -func ResolvePluginPaths(paths []string) PluginMetaSet { - s := make(PluginMetaSet) - - type nameVersion struct { - Name string - Version string - } - found := make(map[nameVersion]struct{}) - - for _, path := range paths { - baseName := strings.ToLower(filepath.Base(path)) - if !strings.HasPrefix(baseName, "terraform-") { - // Should never happen with reasonable input - continue - } - - baseName = baseName[10:] - firstDash := strings.Index(baseName, "-") - if firstDash == -1 { - // Should never happen with reasonable input - continue - } - - baseName = baseName[firstDash+1:] - if baseName == "" { - // Should never happen with reasonable input - continue - } - - // Trim the .exe suffix used on Windows before we start wrangling - // the remainder of the path. - if strings.HasSuffix(baseName, ".exe") { - baseName = baseName[:len(baseName)-4] - } - - parts := strings.SplitN(baseName, "_v", 2) - name := parts[0] - version := VersionZero - if len(parts) == 2 { - version = parts[1] - } - - // Auto-installed plugins contain an extra name portion representing - // the expected plugin version, which we must trim off. - if underX := strings.Index(version, "_x"); underX != -1 { - version = version[:underX] - } - - if _, ok := found[nameVersion{name, version}]; ok { - // Skip duplicate versions of the same plugin - // (We do this during this step because after this we will be - // dealing with sets and thus lose our ordering with which to - // decide preference.) - continue - } - - s.Add(PluginMeta{ - Name: name, - Version: VersionStr(version), - Path: path, - }) - found[nameVersion{name, version}] = struct{}{} - } - - return s -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go deleted file mode 100644 index 64d2b69..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go +++ /dev/null @@ -1,432 +0,0 @@ -package discovery - -import ( - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "runtime" - "strconv" - "strings" - - "golang.org/x/net/html" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" - multierror "github.com/hashicorp/go-multierror" - "github.com/mitchellh/cli" -) - -// Releases are located by parsing the html listing from releases.hashicorp.com. -// -// The URL for releases follows the pattern: -// https://releases.hashicorp.com/terraform-provider-name//terraform-provider-name___. -// -// The plugin protocol version will be saved with the release and returned in -// the header X-TERRAFORM_PROTOCOL_VERSION. - -const protocolVersionHeader = "x-terraform-protocol-version" - -var releaseHost = "https://releases.hashicorp.com" - -var httpClient = cleanhttp.DefaultClient() - -// An Installer maintains a local cache of plugins by downloading plugins -// from an online repository. -type Installer interface { - Get(name string, req Constraints) (PluginMeta, error) - PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) -} - -// ProviderInstaller is an Installer implementation that knows how to -// download Terraform providers from the official HashiCorp releases service -// into a local directory. The files downloaded are compliant with the -// naming scheme expected by FindPlugins, so the target directory of a -// provider installer can be used as one of several plugin discovery sources. -type ProviderInstaller struct { - Dir string - - PluginProtocolVersion uint - - // OS and Arch specify the OS and architecture that should be used when - // installing plugins. These use the same labels as the runtime.GOOS and - // runtime.GOARCH variables respectively, and indeed the values of these - // are used as defaults if either of these is the empty string. - OS string - Arch string - - // Skip checksum and signature verification - SkipVerify bool - - Ui cli.Ui // Ui for output -} - -// Get is part of an implementation of type Installer, and attempts to download -// and install a Terraform provider matching the given constraints. -// -// This method may return one of a number of sentinel errors from this -// package to indicate issues that are likely to be resolvable via user action: -// -// ErrorNoSuchProvider: no provider with the given name exists in the repository. -// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. -// ErrorNoVersionCompatible: a plugin was found within the constraints but it is -// incompatible with the current Terraform version. -// -// These errors should be recognized and handled as special cases by the caller -// to present a suitable user-oriented error message. -// -// All other errors indicate an internal problem that is likely _not_ solvable -// through user action, or at least not within Terraform's scope. Error messages -// are produced under the assumption that if presented to the user they will -// be presented alongside context about what is being installed, and thus the -// error messages do not redundantly include such information. -func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) { - versions, err := i.listProviderVersions(provider) - // TODO: return multiple errors - if err != nil { - return PluginMeta{}, err - } - - if len(versions) == 0 { - return PluginMeta{}, ErrorNoSuitableVersion - } - - versions = allowedVersions(versions, req) - if len(versions) == 0 { - return PluginMeta{}, ErrorNoSuitableVersion - } - - // sort them newest to oldest - Versions(versions).Sort() - - // take the first matching plugin we find - for _, v := range versions { - url := i.providerURL(provider, v.String()) - - if !i.SkipVerify { - sha256, err := i.getProviderChecksum(provider, v.String()) - if err != nil { - return PluginMeta{}, err - } - - // add the checksum parameter for go-getter to verify the download for us. - if sha256 != "" { - url = url + "?checksum=sha256:" + sha256 - } - } - - log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) - if checkPlugin(url, i.PluginProtocolVersion) { - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) - log.Printf("[DEBUG] getting provider %q version %q at %s", provider, v, url) - err := getter.Get(i.Dir, url) - if err != nil { - return PluginMeta{}, err - } - - // Find what we just installed - // (This is weird, because go-getter doesn't directly return - // information about what was extracted, and we just extracted - // the archive directly into a shared dir here.) - log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v) - metas := FindPlugins("provider", []string{i.Dir}) - log.Printf("[DEBUG] all plugins found %#v", metas) - metas, _ = metas.ValidateVersions() - metas = metas.WithName(provider).WithVersion(v) - log.Printf("[DEBUG] filtered plugins %#v", metas) - if metas.Count() == 0 { - // This should never happen. Suggests that the release archive - // contains an executable file whose name doesn't match the - // expected convention. - return PluginMeta{}, fmt.Errorf( - "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", - v, - ) - } - - if metas.Count() > 1 { - // This should also never happen, and suggests that a - // particular version was re-released with a different - // executable filename. We consider releases as immutable, so - // this is an error. - return PluginMeta{}, fmt.Errorf( - "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", - v, - ) - } - - // By now we know we have exactly one meta, and so "Newest" will - // return that one. - return metas.Newest(), nil - } - - log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v) - } - - return PluginMeta{}, ErrorNoVersionCompatible -} - -func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { - purge := make(PluginMetaSet) - - present := FindPlugins("provider", []string{i.Dir}) - for meta := range present { - chosen, ok := used[meta.Name] - if !ok { - purge.Add(meta) - } - if chosen.Path != meta.Path { - purge.Add(meta) - } - } - - removed := make(PluginMetaSet) - var errs error - for meta := range purge { - path := meta.Path - err := os.Remove(path) - if err != nil { - errs = multierror.Append(errs, fmt.Errorf( - "failed to remove unused provider plugin %s: %s", - path, err, - )) - } else { - removed.Add(meta) - } - } - - return removed, errs -} - -// Plugins are referred to by the short name, but all URLs and files will use -// the full name prefixed with terraform-- -func (i *ProviderInstaller) providerName(name string) string { - return "terraform-provider-" + name -} - -func (i *ProviderInstaller) providerFileName(name, version string) string { - os := i.OS - arch := i.Arch - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch) -} - -// providerVersionsURL returns the path to the released versions directory for the provider: -// https://releases.hashicorp.com/terraform-provider-name/ -func (i *ProviderInstaller) providerVersionsURL(name string) string { - return releaseHost + "/" + i.providerName(name) + "/" -} - -// providerURL returns the full path to the provider file, using the current OS -// and ARCH: -// .../terraform-provider-name_/terraform-provider-name___. -func (i *ProviderInstaller) providerURL(name, version string) string { - return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version)) -} - -func (i *ProviderInstaller) providerChecksumURL(name, version string) string { - fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version) - u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName) - return u -} - -func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) { - checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version)) - if err != nil { - return "", err - } - - return checksumForFile(checksums, i.providerFileName(name, version)), nil -} - -// Return the plugin version by making a HEAD request to the provided url. -// If the header is not present, we assume the latest version will be -// compatible, and leave the check for discovery or execution. -func checkPlugin(url string, pluginProtocolVersion uint) bool { - resp, err := httpClient.Head(url) - if err != nil { - log.Printf("[ERROR] error fetching plugin headers: %s", err) - return false - } - - if resp.StatusCode != http.StatusOK { - log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status) - return false - } - - proto := resp.Header.Get(protocolVersionHeader) - if proto == "" { - // The header isn't present, but we don't make this error fatal since - // the latest version will probably work. - log.Printf("[WARNING] missing %s from: %s", protocolVersionHeader, url) - return true - } - - protoVersion, err := strconv.Atoi(proto) - if err != nil { - log.Printf("[ERROR] invalid ProtocolVersion: %s", proto) - return false - } - - return protoVersion == int(pluginProtocolVersion) -} - -// list the version available for the named plugin -func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) { - versions, err := listPluginVersions(i.providerVersionsURL(name)) - if err != nil { - // listPluginVersions returns a verbose error message indicating - // what was being accessed and what failed - return nil, err - } - return versions, nil -} - -var errVersionNotFound = errors.New("version not found") - -// take the list of available versions for a plugin, and filter out those that -// don't fit the constraints. -func allowedVersions(available []Version, required Constraints) []Version { - var allowed []Version - - for _, v := range available { - if required.Allows(v) { - allowed = append(allowed, v) - } - } - - return allowed -} - -// return a list of the plugin versions at the given URL -func listPluginVersions(url string) ([]Version, error) { - resp, err := httpClient.Get(url) - if err != nil { - // http library produces a verbose error message that includes the - // URL being accessed, etc. - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := ioutil.ReadAll(resp.Body) - log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body) - - switch resp.StatusCode { - case http.StatusNotFound, http.StatusForbidden: - // These are treated as indicative of the given name not being - // a valid provider name at all. - return nil, ErrorNoSuchProvider - - default: - // All other errors are assumed to be operational problems. - return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status) - } - - } - - body, err := html.Parse(resp.Body) - if err != nil { - log.Fatal(err) - } - - names := []string{} - - // all we need to do is list links on the directory listing page that look like plugins - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - c := n.FirstChild - if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") { - names = append(names, c.Data) - return - } - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(body) - - return versionsFromNames(names), nil -} - -// parse the list of directory names into a sorted list of available versions -func versionsFromNames(names []string) []Version { - var versions []Version - for _, name := range names { - parts := strings.SplitN(name, "_", 2) - if len(parts) == 2 && parts[1] != "" { - v, err := VersionStr(parts[1]).Parse() - if err != nil { - // filter invalid versions scraped from the page - log.Printf("[WARN] invalid version found for %q: %s", name, err) - continue - } - - versions = append(versions, v) - } - } - - return versions -} - -func checksumForFile(sums []byte, name string) string { - for _, line := range strings.Split(string(sums), "\n") { - parts := strings.Fields(line) - if len(parts) > 1 && parts[1] == name { - return parts[0] - } - } - return "" -} - -// fetch the SHA256SUMS file provided, and verify its signature. -func getPluginSHA256SUMs(sumsURL string) ([]byte, error) { - sigURL := sumsURL + ".sig" - - sums, err := getFile(sumsURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums: %s", err) - } - - sig, err := getFile(sigURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums signature: %s", err) - } - - if err := verifySig(sums, sig); err != nil { - return nil, err - } - - return sums, nil -} - -func getFile(url string) ([]byte, error) { - resp, err := httpClient.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return data, err - } - return data, nil -} - -func GetReleaseHost() string { - return releaseHost -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go deleted file mode 100644 index bdcebcb..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go +++ /dev/null @@ -1,41 +0,0 @@ -package discovery - -import ( - "crypto/sha256" - "io" - "os" -) - -// PluginMeta is metadata about a plugin, useful for launching the plugin -// and for understanding which plugins are available. -type PluginMeta struct { - // Name is the name of the plugin, e.g. as inferred from the plugin - // binary's filename, or by explicit configuration. - Name string - - // Version is the semver version of the plugin, expressed as a string - // that might not be semver-valid. - Version VersionStr - - // Path is the absolute path of the executable that can be launched - // to provide the RPC server for this plugin. - Path string -} - -// SHA256 returns a SHA256 hash of the content of the referenced executable -// file, or an error if the file's contents cannot be read. -func (m PluginMeta) SHA256() ([]byte, error) { - f, err := os.Open(m.Path) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go deleted file mode 100644 index 181ea1f..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go +++ /dev/null @@ -1,195 +0,0 @@ -package discovery - -// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. -// -// Methods on this type allow filtering of the set to produce subsets that -// meet more restrictive criteria. -type PluginMetaSet map[PluginMeta]struct{} - -// Add inserts the given PluginMeta into the receiving set. This is a no-op -// if the given meta is already present. -func (s PluginMetaSet) Add(p PluginMeta) { - s[p] = struct{}{} -} - -// Remove removes the given PluginMeta from the receiving set. This is a no-op -// if the given meta is not already present. -func (s PluginMetaSet) Remove(p PluginMeta) { - delete(s, p) -} - -// Has returns true if the given meta is in the receiving set, or false -// otherwise. -func (s PluginMetaSet) Has(p PluginMeta) bool { - _, ok := s[p] - return ok -} - -// Count returns the number of metas in the set -func (s PluginMetaSet) Count() int { - return len(s) -} - -// ValidateVersions returns two new PluginMetaSets, separating those with -// versions that have syntax-valid semver versions from those that don't. -// -// Eliminating invalid versions from consideration (and possibly warning about -// them) is usually the first step of working with a meta set after discovery -// has completed. -func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { - valid = make(PluginMetaSet) - invalid = make(PluginMetaSet) - for p := range s { - if _, err := p.Version.Parse(); err == nil { - valid.Add(p) - } else { - invalid.Add(p) - } - } - return -} - -// WithName returns the subset of metas that have the given name. -func (s PluginMetaSet) WithName(name string) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - if p.Name == name { - ns.Add(p) - } - } - return ns -} - -// WithVersion returns the subset of metas that have the given version. -// -// This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have a invalid version strings. -func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - gotVersion, err := p.Version.Parse() - if err != nil { - continue - } - if gotVersion.Equal(version) { - ns.Add(p) - } - } - return ns -} - -// ByName groups the metas in the set by their Names, returning a map. -func (s PluginMetaSet) ByName() map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - ret[p.Name].Add(p) - } - return ret -} - -// Newest returns the one item from the set that has the newest Version value. -// -// The result is meaningful only if the set is already filtered such that -// all of the metas have the same Name. -// -// If there isn't at least one meta in the set then this function will panic. -// Use Count() to ensure that there is at least one value before calling. -// -// If any of the metas have invalid version strings then this function will -// panic. Use ValidateVersions() first to filter out metas with invalid -// versions. -// -// If two metas have the same Version then one is arbitrarily chosen. This -// situation should be avoided by pre-filtering the set. -func (s PluginMetaSet) Newest() PluginMeta { - if len(s) == 0 { - panic("can't call NewestStable on empty PluginMetaSet") - } - - var first = true - var winner PluginMeta - var winnerVersion Version - for p := range s { - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - - if first == true || version.NewerThan(winnerVersion) { - winner = p - winnerVersion = version - first = false - } - } - - return winner -} - -// ConstrainVersions takes a set of requirements and attempts to -// return a map from name to a set of metas that have the matching -// name and an appropriate version. -// -// If any of the given requirements match *no* plugins then its PluginMetaSet -// in the returned map will be empty. -// -// All viable metas are returned, so the caller can apply any desired filtering -// to reduce down to a single option. For example, calling Newest() to obtain -// the highest available version. -// -// If any of the metas in the set have invalid version strings then this -// function will panic. Use ValidateVersions() first to filter out metas with -// invalid versions. -func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - name := p.Name - allowedVersions, ok := reqd[name] - if !ok { - continue - } - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - if allowedVersions.Allows(version) { - ret[p.Name].Add(p) - } - } - return ret -} - -// OverridePaths returns a new set where any existing plugins with the given -// names are removed and replaced with the single path given in the map. -// -// This is here only to continue to support the legacy way of overriding -// plugin binaries in the .terraformrc file. It treats all given plugins -// as pre-versioning (version 0.0.0). This mechanism will eventually be -// phased out, with vendor directories being the intended replacement. -func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { - ret := make(PluginMetaSet) - for p := range s { - if _, ok := paths[p.Name]; ok { - // Skip plugins that we're overridding - continue - } - - ret.Add(p) - } - - // Now add the metadata for overriding plugins - for name, path := range paths { - ret.Add(PluginMeta{ - Name: name, - Version: VersionZero, - Path: path, - }) - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go deleted file mode 100644 index 75430fd..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go +++ /dev/null @@ -1,105 +0,0 @@ -package discovery - -import ( - "bytes" -) - -// PluginRequirements describes a set of plugins (assumed to be of a consistent -// kind) that are required to exist and have versions within the given -// corresponding sets. -type PluginRequirements map[string]*PluginConstraints - -// PluginConstraints represents an element of PluginRequirements describing -// the constraints for a single plugin. -type PluginConstraints struct { - // Specifies that the plugin's version must be within the given - // constraints. - Versions Constraints - - // If non-nil, the hash of the on-disk plugin executable must exactly - // match the SHA256 hash given here. - SHA256 []byte -} - -// Allows returns true if the given version is within the receiver's version -// constraints. -func (s *PluginConstraints) Allows(v Version) bool { - return s.Versions.Allows(v) -} - -// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, -// either because it matches the constraint or because there is no such -// constraint. -func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { - if s.SHA256 == nil { - return true - } - return bytes.Equal(s.SHA256, digest) -} - -// Merge takes the contents of the receiver and the other given requirements -// object and merges them together into a single requirements structure -// that satisfies both sets of requirements. -// -// Note that it doesn't make sense to merge two PluginRequirements with -// differing required plugin SHA256 hashes, since the result will never -// match any plugin. -func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { - ret := make(PluginRequirements) - for n, c := range r { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - for n, c := range other { - if existing, exists := ret[n]; exists { - ret[n].Versions = ret[n].Versions.Append(c.Versions) - - if existing.SHA256 != nil { - if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { - // If we've been asked to merge two constraints with - // different SHA256 hashes then we'll produce a dummy value - // that can never match anything. This is a silly edge case - // that no reasonable caller should hit. - ret[n].SHA256 = []byte(invalidProviderHash) - } - } else { - ret[n].SHA256 = c.SHA256 // might still be nil - } - } else { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - } - return ret -} - -// LockExecutables applies additional constraints to the receiver that -// require plugin executables with specific SHA256 digests. This modifies -// the receiver in-place, since it's intended to be applied after -// version constraints have been resolved. -// -// The given map must include a key for every plugin that is already -// required. If not, any missing keys will cause the corresponding plugin -// to never match, though the direct caller doesn't necessarily need to -// guarantee this as long as the downstream code _applying_ these constraints -// is able to deal with the non-match in some way. -func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { - for name, cons := range r { - digest := sha256s[name] - - if digest == nil { - // Prevent any match, which will then presumably cause the - // downstream consumer of this requirements to report an error. - cons.SHA256 = []byte(invalidProviderHash) - continue - } - - cons.SHA256 = digest - } -} - -const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go deleted file mode 100644 index b6686a5..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go +++ /dev/null @@ -1,53 +0,0 @@ -package discovery - -import ( - "bytes" - "log" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// Verify the data using the provided openpgp detached signature and the -// embedded hashicorp public key. -func verifySig(data, sig []byte) error { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) - if err != nil { - log.Fatal(err) - } - - _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) - return err -} - -// this is the public key that signs the checksums file for releases. -const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go deleted file mode 100644 index 8fad58d..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go +++ /dev/null @@ -1,72 +0,0 @@ -package discovery - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" -) - -const VersionZero = "0.0.0" - -// A VersionStr is a string containing a possibly-invalid representation -// of a semver version number. Call Parse on it to obtain a real Version -// object, or discover that it is invalid. -type VersionStr string - -// Parse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s VersionStr) Parse() (Version, error) { - raw, err := version.NewVersion(string(s)) - if err != nil { - return Version{}, err - } - return Version{raw}, nil -} - -// MustParse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then it panics. -func (s VersionStr) MustParse() Version { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Version represents a version number that has been parsed from -// a semver string and known to be valid. -type Version struct { - // We wrap this here just because it avoids a proliferation of - // direct go-version imports all over the place, and keeps the - // version-processing details within this package. - raw *version.Version -} - -func (v Version) String() string { - return v.raw.String() -} - -func (v Version) NewerThan(other Version) bool { - return v.raw.GreaterThan(other.raw) -} - -func (v Version) Equal(other Version) bool { - return v.raw.Equal(other.raw) -} - -// MinorUpgradeConstraintStr returns a ConstraintStr that would permit -// minor upgrades relative to the receiving version. -func (v Version) MinorUpgradeConstraintStr() ConstraintStr { - segments := v.raw.Segments() - return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) -} - -type Versions []Version - -// Sort sorts version from newest to oldest. -func (v Versions) Sort() { - sort.Slice(v, func(i, j int) bool { - return v[i].NewerThan(v[j]) - }) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go deleted file mode 100644 index 0aefd75..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go +++ /dev/null @@ -1,84 +0,0 @@ -package discovery - -import ( - "sort" - - version "github.com/hashicorp/go-version" -) - -// A ConstraintStr is a string containing a possibly-invalid representation -// of a version constraint provided in configuration. Call Parse on it to -// obtain a real Constraint object, or discover that it is invalid. -type ConstraintStr string - -// Parse transforms a ConstraintStr into a Constraints if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s ConstraintStr) Parse() (Constraints, error) { - raw, err := version.NewConstraint(string(s)) - if err != nil { - return Constraints{}, err - } - return Constraints{raw}, nil -} - -// MustParse is like Parse but it panics if the constraint string is invalid. -func (s ConstraintStr) MustParse() Constraints { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Constraints represents a set of versions which any given Version is either -// a member of or not. -type Constraints struct { - raw version.Constraints -} - -// AllVersions is a Constraints containing all versions -var AllVersions Constraints - -func init() { - AllVersions = Constraints{ - raw: make(version.Constraints, 0), - } -} - -// Allows returns true if the given version permitted by the receiving -// constraints set. -func (s Constraints) Allows(v Version) bool { - return s.raw.Check(v.raw) -} - -// Append combines the receiving set with the given other set to produce -// a set that is the intersection of both sets, which is to say that resulting -// constraints contain only the versions that are members of both. -func (s Constraints) Append(other Constraints) Constraints { - raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) - - // Since "raw" is a list of constraints that remove versions from the set, - // "Intersection" is implemented by concatenating together those lists, - // thus leaving behind only the versions not removed by either list. - raw = append(raw, s.raw...) - raw = append(raw, other.raw...) - - // while the set is unordered, we sort these lexically for consistent output - sort.Slice(raw, func(i, j int) bool { - return raw[i].String() < raw[j].String() - }) - - return Constraints{raw} -} - -// String returns a string representation of the set members as a set -// of range constraints. -func (s Constraints) String() string { - return s.raw.String() -} - -// Unconstrained returns true if and only if the receiver is an empty -// constraint set. -func (s Constraints) Unconstrained() bool { - return len(s.raw) == 0 -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go deleted file mode 100644 index 00fa7b2..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go +++ /dev/null @@ -1,13 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" -) - -// See serve.go for serving plugins - -// PluginMap should be used by clients for the map of plugins. -var PluginMap = map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{}, - "provisioner": &ResourceProvisionerPlugin{}, -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go deleted file mode 100644 index 473f786..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ /dev/null @@ -1,578 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProviderPlugin is the plugin.Plugin implementation. -type ResourceProviderPlugin struct { - F func() terraform.ResourceProvider -} - -func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil -} - -func (p *ResourceProviderPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvider{Broker: b, Client: c}, nil -} - -// ResourceProvider is an implementation of terraform.ResourceProvider -// that communicates over RPC. -type ResourceProvider struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvider) Stop() error { - var resp ResourceProviderStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIInputServer{ - UIInput: input, - }) - - var resp ResourceProviderInputResponse - args := ResourceProviderInputArgs{ - InputId: id, - Config: c, - } - - err := p.Client.Call("Plugin.Input", &args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - return nil, err - } - - return resp.Config, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResponse - args := ResourceProviderValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateResource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - var resp ResourceProviderConfigureResponse - err := p.Client.Call("Plugin.Configure", c, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderApplyResponse - args := &ResourceProviderApplyArgs{ - Info: info, - State: s, - Diff: d, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderDiffResponse - args := &ResourceProviderDiffArgs{ - Info: info, - State: s, - Config: c, - } - err := p.Client.Call("Plugin.Diff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - var resp ResourceProviderRefreshResponse - args := &ResourceProviderRefreshArgs{ - Info: info, - State: s, - } - - err := p.Client.Call("Plugin.Refresh", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - var resp ResourceProviderImportStateResponse - args := &ResourceProviderImportStateArgs{ - Info: info, - Id: id, - } - - err := p.Client.Call("Plugin.ImportState", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - var result []terraform.ResourceType - - err := p.Client.Call("Plugin.Resources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderReadDataDiffResponse - args := &ResourceProviderReadDataDiffArgs{ - Info: info, - Config: c, - } - - err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderReadDataApplyResponse - args := &ResourceProviderReadDataApplyArgs{ - Info: info, - Diff: d, - } - - err := p.Client.Call("Plugin.ReadDataApply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) DataSources() []terraform.DataSource { - var result []terraform.DataSource - - err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) Close() error { - return p.Client.Close() -} - -// ResourceProviderServer is a net/rpc compatible structure for serving -// a ResourceProvider. This should not be used directly. -type ResourceProviderServer struct { - Broker *plugin.MuxBroker - Provider terraform.ResourceProvider -} - -type ResourceProviderStopResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderConfigureResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderInputArgs struct { - InputId uint32 - Config *terraform.ResourceConfig -} - -type ResourceProviderInputResponse struct { - Config *terraform.ResourceConfig - Error *plugin.BasicError -} - -type ResourceProviderApplyArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Diff *terraform.InstanceDiff -} - -type ResourceProviderApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderDiffArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProviderDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderRefreshArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState -} - -type ResourceProviderRefreshResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderImportStateArgs struct { - Info *terraform.InstanceInfo - Id string -} - -type ResourceProviderImportStateResponse struct { - State []*terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataApplyArgs struct { - Info *terraform.InstanceInfo - Diff *terraform.InstanceDiff -} - -type ResourceProviderReadDataApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataDiffArgs struct { - Info *terraform.InstanceInfo - Config *terraform.ResourceConfig -} - -type ResourceProviderReadDataDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProviderValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProviderValidateResourceArgs struct { - Config *terraform.ResourceConfig - Type string -} - -type ResourceProviderValidateResourceResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -func (s *ResourceProviderServer) Stop( - _ interface{}, - reply *ResourceProviderStopResponse) error { - err := s.Provider.Stop() - *reply = ResourceProviderStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Input( - args *ResourceProviderInputArgs, - reply *ResourceProviderInputResponse) error { - conn, err := s.Broker.Dial(args.InputId) - if err != nil { - *reply = ResourceProviderInputResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - input := &UIInput{Client: client} - - config, err := s.Provider.Input(input, args.Config) - *reply = ResourceProviderInputResponse{ - Config: config, - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Validate( - args *ResourceProviderValidateArgs, - reply *ResourceProviderValidateResponse) error { - warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ValidateResource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) Configure( - config *terraform.ResourceConfig, - reply *ResourceProviderConfigureResponse) error { - err := s.Provider.Configure(config) - *reply = ResourceProviderConfigureResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Apply( - args *ResourceProviderApplyArgs, - result *ResourceProviderApplyResponse) error { - state, err := s.Provider.Apply(args.Info, args.State, args.Diff) - *result = ResourceProviderApplyResponse{ - State: state, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Diff( - args *ResourceProviderDiffArgs, - result *ResourceProviderDiffResponse) error { - diff, err := s.Provider.Diff(args.Info, args.State, args.Config) - *result = ResourceProviderDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Refresh( - args *ResourceProviderRefreshArgs, - result *ResourceProviderRefreshResponse) error { - newState, err := s.Provider.Refresh(args.Info, args.State) - *result = ResourceProviderRefreshResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ImportState( - args *ResourceProviderImportStateArgs, - result *ResourceProviderImportStateResponse) error { - states, err := s.Provider.ImportState(args.Info, args.Id) - *result = ResourceProviderImportStateResponse{ - State: states, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Resources( - nothing interface{}, - result *[]terraform.ResourceType) error { - *result = s.Provider.Resources() - return nil -} - -func (s *ResourceProviderServer) ValidateDataSource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ReadDataDiff( - args *ResourceProviderReadDataDiffArgs, - result *ResourceProviderReadDataDiffResponse) error { - diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) - *result = ResourceProviderReadDataDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ReadDataApply( - args *ResourceProviderReadDataApplyArgs, - result *ResourceProviderReadDataApplyResponse) error { - newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) - *result = ResourceProviderReadDataApplyResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) DataSources( - nothing interface{}, - result *[]terraform.DataSource) error { - *result = s.Provider.DataSources() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go deleted file mode 100644 index 8fce9d8..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go +++ /dev/null @@ -1,173 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProvisionerPlugin is the plugin.Plugin implementation. -type ResourceProvisionerPlugin struct { - F func() terraform.ResourceProvisioner -} - -func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil -} - -func (p *ResourceProvisionerPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvisioner{Broker: b, Client: c}, nil -} - -// ResourceProvisioner is an implementation of terraform.ResourceProvisioner -// that communicates over RPC. -type ResourceProvisioner struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProvisionerValidateResponse - args := ResourceProvisionerValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvisioner) Apply( - output terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIOutputServer{ - UIOutput: output, - }) - - var resp ResourceProvisionerApplyResponse - args := &ResourceProvisionerApplyArgs{ - OutputId: id, - State: s, - Config: c, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Stop() error { - var resp ResourceProvisionerStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Close() error { - return p.Client.Close() -} - -type ResourceProvisionerValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProvisionerValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProvisionerApplyArgs struct { - OutputId uint32 - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProvisionerApplyResponse struct { - Error *plugin.BasicError -} - -type ResourceProvisionerStopResponse struct { - Error *plugin.BasicError -} - -// ResourceProvisionerServer is a net/rpc compatible structure for serving -// a ResourceProvisioner. This should not be used directly. -type ResourceProvisionerServer struct { - Broker *plugin.MuxBroker - Provisioner terraform.ResourceProvisioner -} - -func (s *ResourceProvisionerServer) Apply( - args *ResourceProvisionerApplyArgs, - result *ResourceProvisionerApplyResponse) error { - conn, err := s.Broker.Dial(args.OutputId) - if err != nil { - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - output := &UIOutput{Client: client} - - err = s.Provisioner.Apply(output, args.State, args.Config) - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProvisionerServer) Validate( - args *ResourceProvisionerValidateArgs, - reply *ResourceProvisionerValidateResponse) error { - warns, errs := s.Provisioner.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProvisionerValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProvisionerServer) Stop( - _ interface{}, - reply *ResourceProvisionerStopResponse) error { - err := s.Provisioner.Stop() - *reply = ResourceProvisionerStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go deleted file mode 100644 index 2028a61..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/serve.go +++ /dev/null @@ -1,54 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// The constants below are the names of the plugins that can be dispensed -// from the plugin server. -const ( - ProviderPluginName = "provider" - ProvisionerPluginName = "provisioner" -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. This should be bumped whenever a change happens in - // one or the other that makes it so that they can't safely communicate. - // This could be adding a new interface value, it could be how - // helper/schema computes diffs, etc. - ProtocolVersion: 4, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type ProviderFunc func() terraform.ResourceProvider -type ProvisionerFunc func() terraform.ResourceProvisioner - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - ProviderFunc ProviderFunc - ProvisionerFunc ProvisionerFunc -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - Plugins: pluginMap(opts), - }) -} - -// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin -// server or client. -func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { - return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, - "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, - } -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go deleted file mode 100644 index 493efc0..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go +++ /dev/null @@ -1,51 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// UIInput is an implementatin of terraform.UIInput that communicates -// over RPC. -type UIInput struct { - Client *rpc.Client -} - -func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { - var resp UIInputInputResponse - err := i.Client.Call("Plugin.Input", opts, &resp) - if err != nil { - return "", err - } - if resp.Error != nil { - err = resp.Error - return "", err - } - - return resp.Value, nil -} - -type UIInputInputResponse struct { - Value string - Error *plugin.BasicError -} - -// UIInputServer is a net/rpc compatible structure for serving -// a UIInputServer. This should not be used directly. -type UIInputServer struct { - UIInput terraform.UIInput -} - -func (s *UIInputServer) Input( - opts *terraform.InputOpts, - reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(opts) - *reply = UIInputInputResponse{ - Value: value, - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go deleted file mode 100644 index c222b00..0000000 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/terraform/terraform" -) - -// UIOutput is an implementatin of terraform.UIOutput that communicates -// over RPC. -type UIOutput struct { - Client *rpc.Client -} - -func (o *UIOutput) Output(v string) { - o.Client.Call("Plugin.Output", v, new(interface{})) -} - -// UIOutputServer is the RPC server for serving UIOutput. -type UIOutputServer struct { - UIOutput terraform.UIOutput -} - -func (s *UIOutputServer) Output( - v string, - reply *interface{}) error { - s.UIOutput.Output(v) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go deleted file mode 100644 index a814a85..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ /dev/null @@ -1,1048 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sort" - "strings" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/experiment" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeVar asks for all variables - InputModeVar InputMode = 1 << iota - - // InputModeVarUnset asks for variables which are not set yet. - // InputModeVar must be set for this to have an effect. - InputModeVarUnset - - // InputModeProvider asks for provider variables - InputModeProvider - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeVar | InputModeProvider -) - -var ( - // contextFailOnShadowError will cause Context operations to return - // errors when shadow operations fail. This is only used for testing. - contextFailOnShadowError = false - - // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every - // Plan operation, effectively testing the Diff DeepCopy whenever - // a Plan occurs. This is enabled for tests. - contextTestDeepCopyOnPlan = false -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Meta *ContextMeta - Destroy bool - Diff *Diff - Hooks []Hook - Module *module.Tree - Parallelism int - State *State - StateFutureAllowed bool - ProviderResolver ResourceProviderResolver - Provisioners map[string]ResourceProvisionerFactory - Shadow bool - Targets []string - Variables map[string]interface{} - - // If non-nil, will apply as additional constraints on the provider - // plugins that will be requested from the provider resolver. - ProviderSHA256s map[string][]byte - SkipProviderVerify bool - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. See the documentation for that. -// -// Extra functions on Context can be found in context_*.go files. -type Context struct { - // Maintainer note: Anytime this struct is changed, please verify - // that newShadowContext still does the right thing. Tests should - // fail regardless but putting this note here as well. - - components contextComponentFactory - destroy bool - diff *Diff - diffLock sync.RWMutex - hooks []Hook - meta *ContextMeta - module *module.Tree - sh *stopHook - shadow bool - state *State - stateLock sync.RWMutex - targets []string - uiInput UIInput - variables map[string]interface{} - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]interface{} - providerSHA256s map[string][]byte - runLock sync.Mutex - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc - shadowErr error -} - -// NewContext creates a new Context structure. -// -// Once a Context is creator, the pointer values within ContextOpts -// should not be mutated in any way, since the pointers are copied, not -// the values themselves. -func NewContext(opts *ContextOpts) (*Context, error) { - // Validate the version requirement if it is given - if opts.Module != nil { - if err := checkRequiredVersion(opts.Module); err != nil { - return nil, err - } - } - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - state := opts.State - if state == nil { - state = new(State) - state.init() - } - - // If our state is from the future, then error. Callers can avoid - // this error by explicitly setting `StateFutureAllowed`. - if !opts.StateFutureAllowed && state.FromFutureTerraform() { - return nil, fmt.Errorf( - "Terraform doesn't allow running any operations against a state\n"+ - "that was written by a future Terraform version. The state is\n"+ - "reporting it is written by Terraform '%s'.\n\n"+ - "Please run at least that version of Terraform to continue.", - state.TFVersion) - } - - // Explicitly reset our state version to our current version so that - // any operations we do will write out that our latest version - // has run. - state.TFVersion = Version - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - par := opts.Parallelism - if par == 0 { - par = 10 - } - - // Set up the variables in the following sequence: - // 0 - Take default values from the configuration - // 1 - Take values from TF_VAR_x environment variables - // 2 - Take values specified in -var flags, overriding values - // set by environment variables if necessary. This includes - // values taken from -var-file in addition. - variables := make(map[string]interface{}) - if opts.Module != nil { - var err error - variables, err = Variables(opts.Module, opts.Variables) - if err != nil { - return nil, err - } - } - - // Bind available provider plugins to the constraints in config - var providers map[string]ResourceProviderFactory - if opts.ProviderResolver != nil { - var err error - deps := ModuleTreeDependencies(opts.Module, state) - reqd := deps.AllPluginRequirements() - if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { - reqd.LockExecutables(opts.ProviderSHA256s) - } - providers, err = resourceProviderFactories(opts.ProviderResolver, reqd) - if err != nil { - return nil, err - } - } else { - providers = make(map[string]ResourceProviderFactory) - } - - diff := opts.Diff - if diff == nil { - diff = &Diff{} - } - - return &Context{ - components: &basicComponentFactory{ - providers: providers, - provisioners: opts.Provisioners, - }, - destroy: opts.Destroy, - diff: diff, - hooks: hooks, - meta: opts.Meta, - module: opts.Module, - shadow: opts.Shadow, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]interface{}), - providerSHA256s: opts.ProviderSHA256s, - sh: sh, - }, nil -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Graph returns the graph used for the given operation type. -// -// The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { - if opts == nil { - opts = &ContextGraphOpts{Validate: true} - } - - log.Printf("[INFO] terraform: building graph: %s", typ) - switch typ { - case GraphTypeApply: - return (&ApplyGraphBuilder{ - Module: c.module, - Diff: c.diff, - State: c.state, - Providers: c.components.ResourceProviders(), - Provisioners: c.components.ResourceProvisioners(), - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(RootModulePath) - - case GraphTypeInput: - // The input graph is just a slightly modified plan graph - fallthrough - case GraphTypeValidate: - // The validate graph is just a slightly modified plan graph - fallthrough - case GraphTypePlan: - // Create the plan graph builder - p := &PlanGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, - } - - // Some special cases for other graph types shared with plan currently - var b GraphBuilder = p - switch typ { - case GraphTypeInput: - b = InputGraphBuilder(p) - case GraphTypeValidate: - // We need to set the provisioners so those can be validated - p.Provisioners = c.components.ResourceProvisioners() - - b = ValidateGraphBuilder(p) - } - - return b.Build(RootModulePath) - - case GraphTypePlanDestroy: - return (&DestroyPlanGraphBuilder{ - Module: c.module, - State: c.state, - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) - - case GraphTypeRefresh: - return (&RefreshGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) - } - - return nil, fmt.Errorf("unknown graph type: %s", typ) -} - -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - -// State returns a copy of the current state associated with this context. -// -// This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *State { - return c.state.DeepCopy() -} - -// Interpolater returns an Interpolater built on a copy of the state -// that can be used to test interpolation values. -func (c *Context) Interpolater() *Interpolater { - var varLock sync.Mutex - var stateLock sync.RWMutex - return &Interpolater{ - Operation: walkApply, - Meta: c.meta, - Module: c.module, - State: c.state.DeepCopy(), - StateLock: &stateLock, - VariableValues: c.variables, - VariableValuesLock: &varLock, - } -} - -// Input asks for input to fill variables and provider configurations. -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) error { - defer c.acquireRun("input")() - - if mode&InputModeVar != 0 { - // Walk the variables first for the root module. We walk them in - // alphabetical order for UX reasons. - rootConf := c.module.Config() - names := make([]string, len(rootConf.Variables)) - m := make(map[string]*config.Variable) - for i, v := range rootConf.Variables { - names[i] = v.Name - m[v.Name] = v - } - sort.Strings(names) - for _, n := range names { - // If we only care about unset variables, then if the variable - // is set, continue on. - if mode&InputModeVarUnset != 0 { - if _, ok := c.variables[n]; ok { - continue - } - } - - var valueType config.VariableType - - v := m[n] - switch valueType = v.Type(); valueType { - case config.VariableTypeUnknown: - continue - case config.VariableTypeMap: - // OK - case config.VariableTypeList: - // OK - case config.VariableTypeString: - // OK - default: - panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) - } - - // If the variable is not already set, and the variable defines a - // default, use that for the value. - if _, ok := c.variables[n]; !ok { - if v.Default != nil { - c.variables[n] = v.Default.(string) - continue - } - } - - // this should only happen during tests - if c.uiInput == nil { - log.Println("[WARN] Content.uiInput is nil") - continue - } - - // Ask the user for a value for this variable - var value string - retry := 0 - for { - var err error - value, err = c.uiInput.Input(&InputOpts{ - Id: fmt.Sprintf("var.%s", n), - Query: fmt.Sprintf("var.%s", n), - Description: v.Description, - }) - if err != nil { - return fmt.Errorf( - "Error asking for %s: %s", n, err) - } - - if value == "" && v.Required() { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - return fmt.Errorf("missing required value for %q", n) - } - retry++ - continue - } - - break - } - - // no value provided, so don't set the variable at all - if value == "" { - continue - } - - decoded, err := parseVariableAsHCL(n, value, valueType) - if err != nil { - return err - } - - if decoded != nil { - c.variables[n] = decoded - } - } - } - - if mode&InputModeProvider != 0 { - // Build the graph - graph, err := c.Graph(GraphTypeInput, nil) - if err != nil { - return err - } - - // Do the walk - if _, err := c.walk(graph, nil, walkInput); err != nil { - return err - } - } - - return nil -} - -// Apply applies the changes represented by this context and returns -// the resulting state. -// -// Even in the case an error is returned, the state may be returned and will -// potentially be partially updated. In addition to returning the resulting -// state, this context is updated with the latest state. -// -// If the state is required after an error, the caller should call -// Context.State, rather than rely on the return value. -// -// TODO: Apply and Refresh should either always return a state, or rely on the -// State() method. Currently the helper/resource testing framework relies -// on the absence of a returned state to determine if Destroy can be -// called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*State, error) { - defer c.acquireRun("apply")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, err := c.Graph(GraphTypeApply, nil) - if err != nil { - return nil, err - } - - // Determine the operation - operation := walkApply - if c.destroy { - operation = walkDestroy - } - - // Walk the graph - walker, err := c.walk(graph, graph, operation) - if len(walker.ValidationErrors) > 0 { - err = multierror.Append(err, walker.ValidationErrors...) - } - - // Clean out any unused things - c.state.prune() - - return c.state, err -} - -// Plan generates an execution plan for the given context. -// -// The execution plan encapsulates the context and can be stored -// in order to reinstantiate a context later for Apply. -// -// Plan also updates the diff of this context to be the diff generated -// by the plan, so Apply can be called after. -func (c *Context) Plan() (*Plan, error) { - defer c.acquireRun("plan")() - - p := &Plan{ - Module: c.module, - Vars: c.variables, - State: c.state, - Targets: c.targets, - - TerraformVersion: VersionString(), - ProviderSHA256s: c.providerSHA256s, - } - - var operation walkOperation - if c.destroy { - operation = walkPlanDestroy - } else { - // Set our state to be something temporary. We do this so that - // the plan can update a fake state so that variables work, then - // we replace it back with our old state. - old := c.state - if old == nil { - c.state = &State{} - c.state.init() - } else { - c.state = old.DeepCopy() - } - defer func() { - c.state = old - }() - - operation = walkPlan - } - - // Setup our diff - c.diffLock.Lock() - c.diff = new(Diff) - c.diff.init() - c.diffLock.Unlock() - - // Build the graph. - graphType := GraphTypePlan - if c.destroy { - graphType = GraphTypePlanDestroy - } - graph, err := c.Graph(graphType, nil) - if err != nil { - return nil, err - } - - // Do the walk - walker, err := c.walk(graph, graph, operation) - if err != nil { - return nil, err - } - p.Diff = c.diff - - // If this is true, it means we're running unit tests. In this case, - // we perform a deep copy just to ensure that all context tests also - // test that a diff is copy-able. This will panic if it fails. This - // is enabled during unit tests. - // - // This should never be true during production usage, but even if it is, - // it can't do any real harm. - if contextTestDeepCopyOnPlan { - p.Diff.DeepCopy() - } - - /* - // We don't do the reverification during the new destroy plan because - // it will use a different apply process. - if X_legacyGraph { - // Now that we have a diff, we can build the exact graph that Apply will use - // and catch any possible cycles during the Plan phase. - if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { - return nil, err - } - } - */ - - var errs error - if len(walker.ValidationErrors) > 0 { - errs = multierror.Append(errs, walker.ValidationErrors...) - } - return p, errs -} - -// Refresh goes through all the resources in the state and refreshes them -// to their latest state. This will update the state that this context -// works with, along with returning it. -// -// Even in the case an error is returned, the state may be returned and -// will potentially be partially updated. -func (c *Context) Refresh() (*State, error) { - defer c.acquireRun("refresh")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, err := c.Graph(GraphTypeRefresh, nil) - if err != nil { - return nil, err - } - - // Do the walk - if _, err := c.walk(graph, graph, walkRefresh); err != nil { - return nil, err - } - - // Clean out any unused things - c.state.prune() - - return c.state, nil -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -// Validate validates the configuration and returns any warnings or errors. -func (c *Context) Validate() ([]string, []error) { - defer c.acquireRun("validate")() - - var errs error - - // Validate the configuration itself - if err := c.module.Validate(); err != nil { - errs = multierror.Append(errs, err) - } - - // This only needs to be done for the root module, since inter-module - // variables are validated in the module tree. - if config := c.module.Config(); config != nil { - // Validate the user variables - if err := smcUserVariables(config, c.variables); len(err) > 0 { - errs = multierror.Append(errs, err...) - } - } - - // If we have errors at this point, the graphing has no chance, - // so just bail early. - if errs != nil { - return nil, []error{errs} - } - - // Build the graph so we can walk it and run Validate on nodes. - // We also validate the graph generated here, but this graph doesn't - // necessarily match the graph that Plan will generate, so we'll validate the - // graph again later after Planning. - graph, err := c.Graph(GraphTypeValidate, nil) - if err != nil { - return nil, []error{err} - } - - // Walk - walker, err := c.walk(graph, graph, walkValidate) - if err != nil { - return nil, multierror.Append(errs, err).Errors - } - - // Return the result - rerrs := multierror.Append(errs, walker.ValidationErrors...) - - sort.Strings(walker.ValidationWarnings) - sort.Slice(rerrs.Errors, func(i, j int) bool { - return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() - }) - - return walker.ValidationWarnings, rerrs.Errors -} - -// Module returns the module tree associated with this context. -func (c *Context) Module() *module.Tree { - return c.module -} - -// Variables will return the mapping of variables that were defined -// for this Context. If Input was called, this mapping may be different -// than what was given. -func (c *Context) Variables() map[string]interface{} { - return c.variables -} - -// SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v interface{}) { - c.variables[k] = v -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Setup debugging - dbug.SetPhase(phase) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - // Reset the shadow errors - c.shadowErr = nil - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // setting the phase to "INVALID" lets us easily detect if we have - // operations happening outside of a run, or we missed setting the proper - // phase - dbug.SetPhase("INVALID") - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -func (c *Context) walk( - graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { - // Keep track of the "real" context which is the context that does - // the real work: talking to real providers, modifying real state, etc. - realCtx := c - - // If we don't want shadowing, remove it - if !experiment.Enabled(experiment.X_shadow) { - shadow = nil - } - - // Just log this so we can see it in a debug log - if !c.shadow { - log.Printf("[WARN] terraform: shadow graph disabled") - shadow = nil - } - - // If we have a shadow graph, walk that as well - var shadowCtx *Context - var shadowCloser Shadow - if shadow != nil { - // Build the shadow context. In the process, override the real context - // with the one that is wrapped so that the shadow context can verify - // the results of the real. - realCtx, shadowCtx, shadowCloser = newShadowContext(c) - } - - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := &ContextGraphWalker{ - Context: realCtx, - Operation: operation, - StopContext: c.runContext, - } - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - realErr := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - // If we have a shadow graph and we interrupted the real graph, then - // we just close the shadow and never verify it. It is non-trivial to - // recreate the exact execution state up until an interruption so this - // isn't supported with shadows at the moment. - if shadowCloser != nil && c.sh.Stopped() { - // Ignore the error result, there is nothing we could care about - shadowCloser.CloseShadow() - - // Set it to nil so we don't do anything - shadowCloser = nil - } - - // If we have a shadow graph, wait for that to complete. - if shadowCloser != nil { - // Build the graph walker for the shadow. We also wrap this in - // a panicwrap so that panics are captured. For the shadow graph, - // we just want panics to be normal errors rather than to crash - // Terraform. - shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ - Context: shadowCtx, - Operation: operation, - }) - - // Kick off the shadow walk. This will block on any operations - // on the real walk so it is fine to start first. - log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) - shadowCh := make(chan error) - go func() { - shadowCh <- shadow.Walk(shadowWalker) - }() - - // Notify the shadow that we're done - if err := shadowCloser.CloseShadow(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Wait for the walk to end - log.Printf("[DEBUG] Waiting for shadow graph to complete...") - shadowWalkErr := <-shadowCh - - // Get any shadow errors - if err := shadowCloser.ShadowError(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Verify the contexts (compare) - if err := shadowContextVerify(realCtx, shadowCtx); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // At this point, if we're supposed to fail on error, then - // we PANIC. Some tests just verify that there is an error, - // so simply appending it to realErr and returning could hide - // shadow problems. - // - // This must be done BEFORE appending shadowWalkErr since the - // shadowWalkErr may include expected errors. - // - // We only do this if we don't have a real error. In the case of - // a real error, we can't guarantee what nodes were and weren't - // traversed in parallel scenarios so we can't guarantee no - // shadow errors. - if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { - panic(multierror.Prefix(c.shadowErr, "shadow graph:")) - } - - // Now, if we have a walk error, we append that through - if shadowWalkErr != nil { - c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) - } - - if c.shadowErr == nil { - log.Printf("[INFO] Shadow graph success!") - } else { - log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) - - // If we're supposed to fail on shadow errors, then report it - if contextFailOnShadowError { - realErr = multierror.Append(realErr, multierror.Prefix( - c.shadowErr, "shadow graph:")) - } - } - } - - return walker, realErr -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]ResourceProvider, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// parseVariableAsHCL parses the value of a single variable as would have been specified -// on the command line via -var or in an environment variable named TF_VAR_x, where x is -// the name of the variable. In order to get around the restriction of HCL requiring a -// top level object, we prepend a sentinel key, decode the user-specified value as its -// value and pull the value back out of the resulting map. -func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) { - // expecting a string so don't decode anything, just strip quotes - if targetType == config.VariableTypeString { - return strings.Trim(input, `"`), nil - } - - // return empty types - if strings.TrimSpace(input) == "" { - switch targetType { - case config.VariableTypeList: - return []interface{}{}, nil - case config.VariableTypeMap: - return make(map[string]interface{}), nil - } - } - - const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY" - inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input) - - var decoded map[string]interface{} - err := hcl.Decode(&decoded, inputWithSentinal) - if err != nil { - return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err) - } - - if len(decoded) != 1 { - return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input) - } - - parsedValue, ok := decoded[sentinelValue] - if !ok { - return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) - } - - switch targetType { - case config.VariableTypeList: - return parsedValue, nil - case config.VariableTypeMap: - if list, ok := parsedValue.([]map[string]interface{}); ok { - return list[0], nil - } - - return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) - default: - panic(fmt.Errorf("unknown type %s", targetType.Printable())) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go deleted file mode 100644 index 6f50744..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// contextComponentFactory is the interface that Context uses -// to initialize various components such as providers and provisioners. -// This factory gets more information than the raw maps using to initialize -// a Context. This information is used for debugging. -type contextComponentFactory interface { - // ResourceProvider creates a new ResourceProvider with the given - // type. The "uid" is a unique identifier for this provider being - // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (ResourceProvider, error) - ResourceProviders() []string - - // ResourceProvisioner creates a new ResourceProvisioner with the - // given type. The "uid" is a unique identifier for this provisioner - // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) - ResourceProvisioners() []string -} - -// basicComponentFactory just calls a factory from a map directly. -type basicComponentFactory struct { - providers map[string]ResourceProviderFactory - provisioners map[string]ResourceProvisionerFactory -} - -func (c *basicComponentFactory) ResourceProviders() []string { - result := make([]string, len(c.providers)) - for k, _ := range c.providers { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvisioners() []string { - result := make([]string, len(c.provisioners)) - for k, _ := range c.provisioners { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { - f, ok := c.providers[typ] - if !ok { - return nil, fmt.Errorf("unknown provider %q", typ) - } - - return f() -} - -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { - f, ok := c.provisioners[typ] - if !ok { - return nil, fmt.Errorf("unknown provisioner %q", typ) - } - - return f() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go deleted file mode 100644 index 084f010..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -//go:generate stringer -type=GraphType context_graph_type.go - -// GraphType is an enum of the type of graph to create with a Context. -// The values of the constants may change so they shouldn't be depended on; -// always use the constant name. -type GraphType byte - -const ( - GraphTypeInvalid GraphType = 0 - GraphTypeLegacy GraphType = iota - GraphTypeRefresh - GraphTypePlan - GraphTypePlanDestroy - GraphTypeApply - GraphTypeInput - GraphTypeValidate -) - -// GraphTypeMap is a mapping of human-readable string to GraphType. This -// is useful to use as the mechanism for human input for configurable -// graph types. -var GraphTypeMap = map[string]GraphType{ - "apply": GraphTypeApply, - "input": GraphTypeInput, - "plan": GraphTypePlan, - "plan-destroy": GraphTypePlanDestroy, - "refresh": GraphTypeRefresh, - "legacy": GraphTypeLegacy, - "validate": GraphTypeValidate, -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go deleted file mode 100644 index f1d5776..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget - - // Module is optional, and specifies a config module that is loaded - // into the graph and evaluated. The use case for this is to provide - // provider configuration. - Module *module.Tree -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the full resource address of the resource to import. - // Example: "module.foo.aws_instance.bar" - Addr string - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // Provider string - Provider string -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(opts *ImportOpts) (*State, error) { - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // If no module is given, default to the module configured with - // the Context. - module := opts.Module - if module == nil { - module = c.module - } - - // Initialize our graph builder - builder := &ImportGraphBuilder{ - ImportTargets: opts.Targets, - Module: module, - Providers: c.components.ResourceProviders(), - } - - // Build the graph! - graph, err := builder.Build(RootModulePath) - if err != nil { - return c.state, err - } - - // Walk it - if _, err := c.walk(graph, nil, walkImport); err != nil { - return c.state, err - } - - // Clean the state - c.state.prune() - - return c.state, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go deleted file mode 100644 index 265339f..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/debug.go +++ /dev/null @@ -1,523 +0,0 @@ -package terraform - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "time" -) - -// DebugInfo is the global handler for writing the debug archive. All methods -// are safe to call concurrently. Setting DebugInfo to nil will disable writing -// the debug archive. All methods are safe to call on the nil value. -var dbug *debugInfo - -// SetDebugInfo initializes the debug handler with a backing file in the -// provided directory. This must be called before any other terraform package -// operations or not at all. Once his is called, CloseDebugInfo should be -// called before program exit. -func SetDebugInfo(path string) error { - if os.Getenv("TF_DEBUG") == "" { - return nil - } - - di, err := newDebugInfoFile(path) - if err != nil { - return err - } - - dbug = di - return nil -} - -// CloseDebugInfo is the exported interface to Close the debug info handler. -// The debug handler needs to be closed before program exit, so we export this -// function to be deferred in the appropriate entrypoint for our executable. -func CloseDebugInfo() error { - return dbug.Close() -} - -// newDebugInfoFile initializes the global debug handler with a backing file in -// the provided directory. -func newDebugInfoFile(dir string) (*debugInfo, error) { - err := os.MkdirAll(dir, 0755) - if err != nil { - return nil, err - } - - // FIXME: not guaranteed unique, but good enough for now - name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999")) - archivePath := filepath.Join(dir, name+".tar.gz") - - f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - return nil, err - } - return newDebugInfo(name, f) -} - -// newDebugInfo initializes the global debug handler. -func newDebugInfo(name string, w io.Writer) (*debugInfo, error) { - gz := gzip.NewWriter(w) - - d := &debugInfo{ - name: name, - w: w, - gz: gz, - tar: tar.NewWriter(gz), - } - - // create the subdirs we need - topHdr := &tar.Header{ - Name: name, - Typeflag: tar.TypeDir, - Mode: 0755, - } - graphsHdr := &tar.Header{ - Name: name + "/graphs", - Typeflag: tar.TypeDir, - Mode: 0755, - } - err := d.tar.WriteHeader(topHdr) - // if the first errors, the second will too - err = d.tar.WriteHeader(graphsHdr) - if err != nil { - return nil, err - } - - return d, nil -} - -// debugInfo provides various methods for writing debug information to a -// central archive. The debugInfo struct should be initialized once before any -// output is written, and Close should be called before program exit. All -// exported methods on debugInfo will be safe for concurrent use. The exported -// methods are also all safe to call on a nil pointer, so that there is no need -// for conditional blocks before writing debug information. -// -// Each write operation done by the debugInfo will flush the gzip.Writer and -// tar.Writer, and call Sync() or Flush() on the output writer as needed. This -// ensures that as much data as possible is written to storage in the event of -// a crash. The append format of the tar file, and the stream format of the -// gzip writer allow easy recovery f the data in the event that the debugInfo -// is not closed before program exit. -type debugInfo struct { - sync.Mutex - - // archive root directory name - name string - - // current operation phase - phase string - - // step is monotonic counter for for recording the order of operations - step int - - // flag to protect Close() - closed bool - - // the debug log output is in a tar.gz format, written to the io.Writer w - w io.Writer - gz *gzip.Writer - tar *tar.Writer -} - -// Set the name of the current operational phase in the debug handler. Each file -// in the archive will contain the name of the phase in which it was created, -// i.e. "input", "apply", "plan", "refresh", "validate" -func (d *debugInfo) SetPhase(phase string) { - if d == nil { - return - } - d.Lock() - defer d.Unlock() - - d.phase = phase -} - -// Close the debugInfo, finalizing the data in storage. This closes the -// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is -// also closed. -func (d *debugInfo) Close() error { - if d == nil { - return nil - } - - d.Lock() - defer d.Unlock() - - if d.closed { - return nil - } - d.closed = true - - d.tar.Close() - d.gz.Close() - - if c, ok := d.w.(io.Closer); ok { - return c.Close() - } - return nil -} - -// debug buffer is an io.WriteCloser that will write itself to the debug -// archive when closed. -type debugBuffer struct { - debugInfo *debugInfo - name string - buf bytes.Buffer -} - -func (b *debugBuffer) Write(d []byte) (int, error) { - return b.buf.Write(d) -} - -func (b *debugBuffer) Close() error { - return b.debugInfo.WriteFile(b.name, b.buf.Bytes()) -} - -// ioutils only has a noop ReadCloser -type nopWriteCloser struct{} - -func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil } -func (nopWriteCloser) Close() error { return nil } - -// NewFileWriter returns an io.WriteClose that will be buffered and written to -// the debug archive when closed. -func (d *debugInfo) NewFileWriter(name string) io.WriteCloser { - if d == nil { - return nopWriteCloser{} - } - - return &debugBuffer{ - debugInfo: d, - name: name, - } -} - -type syncer interface { - Sync() error -} - -type flusher interface { - Flush() error -} - -// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called -// on the output writer if they are available. -func (d *debugInfo) flush() { - d.tar.Flush() - d.gz.Flush() - - if f, ok := d.w.(flusher); ok { - f.Flush() - } - - if s, ok := d.w.(syncer); ok { - s.Sync() - } -} - -// WriteFile writes data as a single file to the debug arhive. -func (d *debugInfo) WriteFile(name string, data []byte) error { - if d == nil { - return nil - } - - d.Lock() - defer d.Unlock() - return d.writeFile(name, data) -} - -func (d *debugInfo) writeFile(name string, data []byte) error { - defer d.flush() - path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name) - d.step++ - - hdr := &tar.Header{ - Name: path, - Mode: 0644, - Size: int64(len(data)), - } - err := d.tar.WriteHeader(hdr) - if err != nil { - return err - } - - _, err = d.tar.Write(data) - return err -} - -// DebugHook implements all methods of the terraform.Hook interface, and writes -// the arguments to a file in the archive. When a suitable format for the -// argument isn't available, the argument is encoded using json.Marshal. If the -// debug handler is nil, all DebugHook methods are noop, so no time is spent in -// marshaling the data structures. -type DebugHook struct{} - -func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String() + "\n") - } - - idCopy, err := id.Copy() - if err != nil { - return HookActionContinue, err - } - js, err := json.MarshalIndent(idCopy, "", " ") - if err != nil { - return HookActionContinue, err - } - buf.Write(js) - - dbug.WriteFile("hook-PreApply", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String() + "\n") - } - - if err != nil { - buf.WriteString(err.Error()) - } - - dbug.WriteFile("hook-PostApply", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreDiff", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - idCopy, err := id.Copy() - if err != nil { - return HookActionContinue, err - } - js, err := json.MarshalIndent(idCopy, "", " ") - if err != nil { - return HookActionContinue, err - } - buf.Write(js) - - dbug.WriteFile("hook-PostDiff", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreProvisionResource", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PostProvisionResource", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PreProvision", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PostProvision", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) { - if dbug == nil { - return - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s1 + "\n") - buf.WriteString(s2 + "\n") - - dbug.WriteFile("hook-ProvisionOutput", buf.Bytes()) -} - -func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreRefresh", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PostRefresh", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PreImportState", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - for _, is := range iss { - if is != nil { - buf.WriteString(is.String() + "\n") - } - } - dbug.WriteFile("hook-PostImportState", buf.Bytes()) - return HookActionContinue, nil -} - -// skip logging this for now, since it could be huge -func (*DebugHook) PostStateUpdate(*State) (HookAction, error) { - return HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go deleted file mode 100644 index fd1687e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ /dev/null @@ -1,866 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "reflect" - "regexp" - "sort" - "strings" - "sync" - - "github.com/mitchellh/copystructure" -) - -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte - -const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate -) - -// multiVal matches the index key to a flatmapped set, list or map -var multiVal = regexp.MustCompile(`\.(#|%)$`) - -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path []string) *ModuleDiff { - m := &ModuleDiff{Path: path} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path []string) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - if reflect.DeepEqual(mod.Path, path) { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(rootModulePath) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -func (d *Diff) init() { - if d.Modules == nil { - rootDiff := &ModuleDiff{Path: rootModulePath} - d.Modules = []*ModuleDiff{rootDiff} - } - for _, m := range d.Modules { - m.init() - } -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name, _ := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key, _ := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - -// InstanceDiff is the diff of a resource from some state to another. -type InstanceDiff struct { - mu sync.Mutex - Attributes map[string]*ResourceAttrDiff - Destroy bool - DestroyDeposed bool - DestroyTainted bool - - // Meta is a simple K/V map that is stored in a diff and persisted to - // plans but otherwise is completely ignored by Terraform core. It is - // meant to be used for additional data a resource may want to pass through. - // The value here must only contain Go primitives and collections. - Meta map[string]interface{} -} - -func (d *InstanceDiff) Lock() { d.mu.Lock() } -func (d *InstanceDiff) Unlock() { d.mu.Unlock() } - -// ResourceAttrDiff is the diff of a single attribute of a resource. -type ResourceAttrDiff struct { - Old string // Old Value - New string // New Value - NewComputed bool // True if new value is computed (unknown currently) - NewRemoved bool // True if this attribute is being removed - NewExtra interface{} // Extra information for the provider - RequiresNew bool // True if change requires new resource - Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved -} - -func (d *ResourceAttrDiff) GoString() string { - return fmt.Sprintf("*%#v", *d) -} - -// DiffAttrType is an enum type that says whether a resource attribute -// diff is an input attribute (comes from the configuration) or an -// output attribute (comes as a result of applying the configuration). An -// example input would be "ami" for AWS and an example output would be -// "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} - -func NewInstanceDiff() *InstanceDiff { - return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} -} - -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff -// for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { - if d.Empty() { - return DiffNone - } - - if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate - } - - if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy - } - - if d.RequiresNew() { - return DiffCreate - } - - return DiffUpdate -} - -// Empty returns true if this diff encapsulates no changes. -func (d *InstanceDiff) Empty() bool { - if d == nil { - return true - } - - d.mu.Lock() - defer d.mu.Unlock() - return !d.Destroy && - !d.DestroyTainted && - !d.DestroyDeposed && - len(d.Attributes) == 0 -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Use DeepEqual - return reflect.DeepEqual(d, d2) -} - -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - -func (d *InstanceDiff) GoString() string { - return fmt.Sprintf("*%#v", InstanceDiff{ - Attributes: d.Attributes, - Destroy: d.Destroy, - DestroyTainted: d.DestroyTainted, - DestroyDeposed: d.DestroyDeposed, - }) -} - -// RequiresNew returns true if the diff requires the creation of a new -// resource (implying the destruction of the old). -func (d *InstanceDiff) RequiresNew() bool { - if d == nil { - return false - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.requiresNew() -} - -func (d *InstanceDiff) requiresNew() bool { - if d == nil { - return false - } - - if d.DestroyTainted { - return true - } - - for _, rd := range d.Attributes { - if rd != nil && rd.RequiresNew { - return true - } - } - - return false -} - -func (d *InstanceDiff) GetDestroyDeposed() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyDeposed -} - -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - -func (d *InstanceDiff) GetDestroyTainted() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyTainted -} - -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - -func (d *InstanceDiff) GetDestroy() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.Destroy -} - -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - -func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { - d.mu.Lock() - defer d.mu.Unlock() - - attr, ok := d.Attributes[key] - return attr, ok -} -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} - -// Safely copies the Attributes map -func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { - d.mu.Lock() - defer d.mu.Unlock() - - attrs := make(map[string]*ResourceAttrDiff) - for k, v := range d.Attributes { - attrs[k] = v - } - - return attrs -} - -// Same checks whether or not two InstanceDiff's are the "same". When -// we say "same", it is not necessarily exactly equal. Instead, it is -// just checking that the same attributes are changing, a destroy -// isn't suddenly happening, etc. -func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - // we can safely compare the pointers without a lock - switch { - case d == nil && d2 == nil: - return true, "" - case d == nil || d2 == nil: - return false, "one nil" - case d == d2: - return true, "" - } - - d.mu.Lock() - defer d.mu.Unlock() - - // If we're going from requiring new to NOT requiring new, then we have - // to see if all required news were computed. If so, it is allowed since - // computed may also mean "same value and therefore not new". - oldNew := d.requiresNew() - newNew := d2.RequiresNew() - if oldNew && !newNew { - oldNew = false - - // This section builds a list of ignorable attributes for requiresNew - // by removing off any elements of collections going to zero elements. - // For collections going to zero, they may not exist at all in the - // new diff (and hence RequiresNew == false). - ignoreAttrs := make(map[string]struct{}) - for k, diffOld := range d.Attributes { - if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { - continue - } - - // This case is in here as a protection measure. The bug that this - // code originally fixed (GH-11349) didn't have to deal with computed - // so I'm not 100% sure what the correct behavior is. Best to leave - // the old behavior. - if diffOld.NewComputed { - continue - } - - // We're looking for the case a map goes to exactly 0. - if diffOld.New != "0" { - continue - } - - // Found it! Ignore all of these. The prefix here is stripping - // off the "%" so it is just "k." - prefix := k[:len(k)-1] - for k2, _ := range d.Attributes { - if strings.HasPrefix(k2, prefix) { - ignoreAttrs[k2] = struct{}{} - } - } - } - - for k, rd := range d.Attributes { - if _, ok := ignoreAttrs[k]; ok { - continue - } - - // If the field is requires new and NOT computed, then what - // we have is a diff mismatch for sure. We set that the old - // diff does REQUIRE a ForceNew. - if rd != nil && rd.RequiresNew && !rd.NewComputed { - oldNew = true - break - } - } - } - - if oldNew != newNew { - return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", oldNew, newNew) - } - - // Verify that destroy matches. The second boolean here allows us to - // have mismatching Destroy if we're moving from RequiresNew true - // to false above. Therefore, the second boolean will only pass if - // we're moving from Destroy: true to false as well. - if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { - return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) - } - - // Go through the old diff and make sure the new diff has all the - // same attributes. To start, build up the check map to be all the keys. - checkOld := make(map[string]struct{}) - checkNew := make(map[string]struct{}) - for k, _ := range d.Attributes { - checkOld[k] = struct{}{} - } - for k, _ := range d2.CopyAttributes() { - checkNew[k] = struct{}{} - } - - // Make an ordered list so we are sure the approximated hashes are left - // to process at the end of the loop - keys := make([]string, 0, len(d.Attributes)) - for k, _ := range d.Attributes { - keys = append(keys, k) - } - sort.StringSlice(keys).Sort() - - for _, k := range keys { - diffOld := d.Attributes[k] - - if _, ok := checkOld[k]; !ok { - // We're not checking this key for whatever reason (see where - // check is modified). - continue - } - - // Remove this key since we'll never hit it again - delete(checkOld, k) - delete(checkNew, k) - - _, ok := d2.GetAttribute(k) - if !ok { - // If there's no new attribute, and the old diff expected the attribute - // to be removed, that's just fine. - if diffOld.NewRemoved { - continue - } - - // If the last diff was a computed value then the absense of - // that value is allowed since it may mean the value ended up - // being the same. - if diffOld.NewComputed { - ok = true - } - - // No exact match, but maybe this is a set containing computed - // values. So check if there is an approximate hash in the key - // and if so, try to match the key. - if strings.Contains(k, "~") { - parts := strings.Split(k, ".") - parts2 := append([]string(nil), parts...) - - re := regexp.MustCompile(`^~\d+$`) - for i, part := range parts { - if re.MatchString(part) { - // we're going to consider this the base of a - // computed hash, and remove all longer matching fields - ok = true - - parts2[i] = `\d+` - parts2 = parts2[:i+1] - break - } - } - - re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) - if err != nil { - return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) - } - - for k2, _ := range checkNew { - if re.MatchString(k2) { - delete(checkNew, k2) - } - } - } - - // This is a little tricky, but when a diff contains a computed - // list, set, or map that can only be interpolated after the apply - // command has created the dependent resources, it could turn out - // that the result is actually the same as the existing state which - // would remove the key from the diff. - if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - // Similarly, in a RequiresNew scenario, a list that shows up in the plan - // diff can disappear from the apply diff, which is calculated from an - // empty state. - if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - if !ok { - return false, fmt.Sprintf("attribute mismatch: %s", k) - } - } - - // search for the suffix of the base of a [computed] map, list or set. - match := multiVal.FindStringSubmatch(k) - - if diffOld.NewComputed && len(match) == 2 { - matchLen := len(match[1]) - - // This is a computed list, set, or map, so remove any keys with - // this prefix from the check list. - kprefix := k[:len(k)-matchLen] - for k2, _ := range checkOld { - if strings.HasPrefix(k2, kprefix) { - delete(checkOld, k2) - } - } - for k2, _ := range checkNew { - if strings.HasPrefix(k2, kprefix) { - delete(checkNew, k2) - } - } - } - - // TODO: check for the same value if not computed - } - - // Check for leftover attributes - if len(checkNew) > 0 { - extras := make([]string, 0, len(checkNew)) - for attr, _ := range checkNew { - extras = append(extras, attr) - } - return false, - fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) - } - - return true, "" -} - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go deleted file mode 100644 index bc9d638..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// DestroyEdge is an edge that represents a standard "destroy" relationship: -// Target depends on Source because Source is destroying. -type DestroyEdge struct { - S, T dag.Vertex -} - -func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } -func (e *DestroyEdge) Source() dag.Vertex { return e.S } -func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go deleted file mode 100644 index 10d9c22..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ /dev/null @@ -1,63 +0,0 @@ -package terraform - -import ( - "log" - "strings" -) - -// EvalNode is the interface that must be implemented by graph nodes to -// evaluate/execute. -type EvalNode interface { - // Eval evaluates this node with the given context. The second parameter - // are the argument values. These will match in order and 1-1 with the - // results of the Args() return value. - Eval(EvalContext) (interface{}, error) -} - -// GraphNodeEvalable is the interface that graph nodes must implement -// to enable valuation. -type GraphNodeEvalable interface { - EvalTree() EvalNode -} - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } - -// Eval evaluates the given EvalNode with the given context, properly -// evaluating all args in the correct order. -func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { - // Call the lower level eval which doesn't understand early exit, - // and if we early exit, it isn't an error. - result, err := EvalRaw(n, ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - return nil, nil - } - } - - return result, err -} - -// EvalRaw is like Eval except that it returns all errors, even if they -// signal something normal such as EvalEarlyExitError. -func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { - path := "unknown" - if ctx != nil { - path = strings.Join(ctx.Path(), ".") - } - - log.Printf("[TRACE] %s: eval: %T", path, n) - output, err := n.Eval(ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) - } else { - log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) - } - } - - return output, err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go deleted file mode 100644 index 2f6a497..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ /dev/null @@ -1,359 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff - Provider *ResourceProvider - Output **InstanceState - CreateNew *bool - Error *error -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - diff := *n.Diff - provider := *n.Provider - state := *n.State - - // If we have no diff, we have nothing to do! - if diff.Empty() { - log.Printf( - "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) - return nil, nil - } - - // Remove any output values from the diff - for k, ad := range diff.CopyAttributes() { - if ad.Type == DiffAttrOutput { - diff.DelAttribute(k) - } - } - - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) - } - state.init() - - // Flag if we're creating a new instance - if n.CreateNew != nil { - *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() - } - - // With the completed diff, apply! - log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) - state, err := provider.Apply(n.Info, state, diff) - if state == nil { - state = new(InstanceState) - } - state.init() - - // Force the "id" attribute to be our ID - if state.ID != "" { - state.Attributes["id"] = state.ID - } - - // If the value is the unknown variable value, then it is an error. - // In this case we record the error and remove it from the state - for ak, av := range state.Attributes { - if av == config.UnknownVariableValue { - err = multierror.Append(err, fmt.Errorf( - "Attribute with unknown value: %s", ak)) - delete(state.Attributes, ak) - } - } - - // Write the final state - if n.Output != nil { - *n.Output = state - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - if err != nil { - if n.Error != nil { - helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) - *n.Error = multierror.Append(*n.Error, helpfulErr) - } else { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - diff := *n.Diff - - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) - } - state.init() - - { - // Call post-apply hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Info, state, diff) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Info *InstanceInfo - State **InstanceState - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - { - // Call post-apply hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Info, state, *n.Error) - }) - if err != nil { - return nil, err - } - } - - return nil, *n.Error -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Info *InstanceInfo - State **InstanceState - Resource *config.Resource - InterpResource *Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When config.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - // taint tells us whether to enable tainting. - taint := n.When == config.ProvisionerWhenCreate - - if n.Error != nil && *n.Error != nil { - if taint { - state.Tainted = true - } - - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionResource(n.Info, state) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - if taint { - state.Tainted = true - } - - if n.Error != nil { - *n.Error = multierror.Append(*n.Error, err) - } else { - return nil, err - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionResource(n.Info, state) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { - // Fast path the zero case - if n.Resource == nil { - return nil - } - - if len(n.Resource.Provisioners) == 0 { - return nil - } - - result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) - for _, p := range n.Resource.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { - state := *n.State - - // Store the original connection info, restore later - origConnInfo := state.Ephemeral.ConnInfo - defer func() { - state.Ephemeral.ConnInfo = origConnInfo - }() - - for _, prov := range provs { - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - - // Interpolate the provisioner config - provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) - if err != nil { - return err - } - - // Interpolate the conn info, since it may contain variables - connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) - if err != nil { - return err - } - - // Merge the connection information - overlay := make(map[string]string) - if origConnInfo != nil { - for k, v := range origConnInfo { - overlay[k] = v - } - } - for k, v := range connInfo.Config { - switch vt := v.(type) { - case string: - overlay[k] = vt - case int64: - overlay[k] = strconv.FormatInt(vt, 10) - case int32: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case int: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case float32: - overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) - case float64: - overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64) - case bool: - overlay[k] = strconv.FormatBool(vt) - default: - overlay[k] = fmt.Sprintf("%v", vt) - } - } - state.Ephemeral.ConnInfo = overlay - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvision(n.Info, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(n.Info, prov.Type, msg) - return HookActionContinue, nil - }) - } - - // Invoke the Provisioner - output := CallbackUIOutput{OutputFn: outputFn} - applyErr := provisioner.Apply(&output, state, provConfig) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvision(n.Info, prov.Type, applyErr) - }) - - // Handle the error before we deal with the hook - if applyErr != nil { - // Determine failure behavior - switch prov.OnFailure { - case config.ProvisionerOnFailureContinue: - log.Printf( - "[INFO] apply: %s [%s]: error during provision, continue requested", - n.Info.Id, prov.Type) - - case config.ProvisionerOnFailureFail: - return applyErr - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - return nil - -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go deleted file mode 100644 index 715e79e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// EvalPreventDestroy is an EvalNode implementation that returns an -// error if a resource has PreventDestroy configured and the diff -// would destroy the resource. -type EvalCheckPreventDestroy struct { - Resource *config.Resource - ResourceId string - Diff **InstanceDiff -} - -func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Diff == nil || *n.Diff == nil || n.Resource == nil { - return nil, nil - } - - diff := *n.Diff - preventDestroy := n.Resource.Lifecycle.PreventDestroy - - if diff.GetDestroy() && preventDestroy { - resourceId := n.ResourceId - if resourceId == "" { - resourceId = n.Resource.Id() - } - - return nil, fmt.Errorf(preventDestroyErrStr, resourceId) - } - - return nil, nil -} - -const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.` diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go deleted file mode 100644 index a1f815b..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ /dev/null @@ -1,84 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/config" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() []string - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given name and - // returns the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. - InitProvider(string) (ResourceProvider, error) - - // Provider gets the provider instance with the given name (already - // initialized) or returns nil if the provider isn't initialized. - Provider(string) ResourceProvider - - // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(string) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - ConfigureProvider(string, *ResourceConfig) error - SetProviderConfig(string, *ResourceConfig) error - ParentProviderConfig(string) *ResourceConfig - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - ProviderInput(string) map[string]interface{} - SetProviderInput(string, map[string]interface{}) - - // InitProvisioner initializes the provisioner with the given name and - // returns the implementation of the resource provisioner or an error. - // - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (ResourceProvisioner, error) - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) ResourceProvisioner - - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error - - // Interpolate takes the given raw configuration and completes - // the interpolations, returning the processed ResourceConfig. - // - // The resource argument is optional. If given, it is the resource - // that is currently being acted upon. - Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) - - // SetVariables sets the variables for the module within - // this context with the name n. This function call is additive: - // the second parameter is merged with any previous call. - SetVariables(string, map[string]interface{}) - - // Diff returns the global diff as well as the lock that should - // be used to modify that diff. - Diff() (*Diff, *sync.RWMutex) - - // State returns the global state as well as the lock that should - // be used to modify that state. - State() (*State, *sync.RWMutex) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go deleted file mode 100644 index 3dcfb22..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ /dev/null @@ -1,347 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "strings" - "sync" - - "github.com/hashicorp/terraform/config" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue []string - - // Interpolater setting below affect the interpolation of variables. - // - // The InterpolaterVars are the exact value for ${var.foo} values. - // The map is shared between all contexts and is a mapping of - // PATH to KEY to VALUE. Because it is shared by all contexts as well - // as the Interpolater itself, it is protected by InterpolaterVarLock - // which must be locked during any access to the map. - Interpolater *Interpolater - InterpolaterVars map[string]map[string]interface{} - InterpolaterVarLock *sync.Mutex - - Components contextComponentFactory - Hooks []Hook - InputValue UIInput - ProviderCache map[string]ResourceProvider - ProviderConfigCache map[string]*ResourceConfig - ProviderInputConfig map[string]map[string]interface{} - ProviderLock *sync.Mutex - ProvisionerCache map[string]ResourceProvisioner - ProvisionerLock *sync.Mutex - DiffValue *Diff - DiffLock *sync.RWMutex - StateValue *State - StateLock *sync.RWMutex - - once sync.Once -} - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { - ctx.once.Do(ctx.init) - - // If we already initialized, it is an error - if p := ctx.Provider(n); p != nil { - return nil, fmt.Errorf("Provider '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - key := PathCacheKey(providerPath) - - typeName := strings.SplitN(n, ".", 2)[0] - p, err := ctx.Components.ResourceProvider(typeName, key) - if err != nil { - return nil, err - } - - ctx.ProviderCache[key] = p - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - return ctx.ProviderCache[PathCacheKey(providerPath)] -} - -func (ctx *BuiltinEvalContext) CloseProvider(n string) error { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - var provider interface{} - provider = ctx.ProviderCache[PathCacheKey(providerPath)] - if provider != nil { - if p, ok := provider.(ResourceProviderCloser); ok { - delete(ctx.ProviderCache, PathCacheKey(providerPath)) - return p.Close() - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider( - n string, cfg *ResourceConfig) error { - p := ctx.Provider(n) - if p == nil { - return fmt.Errorf("Provider '%s' not initialized", n) - } - - if err := ctx.SetProviderConfig(n, cfg); err != nil { - return nil - } - - return p.Configure(cfg) -} - -func (ctx *BuiltinEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg - ctx.ProviderLock.Unlock() - - return nil -} - -func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderInputConfig[k]; ok { - return v - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderConfigCache[k]; ok { - return v - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) InitProvisioner( - n string) (ResourceProvisioner, error) { - ctx.once.Do(ctx.init) - - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return nil, fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - key := PathCacheKey(provPath) - - p, err := ctx.Components.ResourceProvisioner(n, key) - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[key] = p - return p, nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - - return ctx.ProvisionerCache[PathCacheKey(provPath)] -} - -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - - var prov interface{} - prov = ctx.ProvisionerCache[PathCacheKey(provPath)] - if prov != nil { - if p, ok := prov.(ResourceProvisionerCloser); ok { - delete(ctx.ProvisionerCache, PathCacheKey(provPath)) - return p.Close() - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Interpolate( - cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { - if cfg != nil { - scope := &InterpolationScope{ - Path: ctx.Path(), - Resource: r, - } - - vs, err := ctx.Interpolater.Values(scope, cfg.Variables) - if err != nil { - return nil, err - } - - // Do the interpolation - if err := cfg.Interpolate(vs); err != nil { - return nil, err - } - } - - result := NewResourceConfig(cfg) - result.interpolateForce() - return result, nil -} - -func (ctx *BuiltinEvalContext) Path() []string { - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { - ctx.InterpolaterVarLock.Lock() - defer ctx.InterpolaterVarLock.Unlock() - - path := make([]string, len(ctx.Path())+1) - copy(path, ctx.Path()) - path[len(path)-1] = n - key := PathCacheKey(path) - - vars := ctx.InterpolaterVars[key] - if vars == nil { - vars = make(map[string]interface{}) - ctx.InterpolaterVars[key] = vars - } - - for k, v := range vs { - vars[k] = v - } -} - -func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { - return ctx.DiffValue, ctx.DiffLock -} - -func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { - return ctx.StateValue, ctx.StateLock -} - -func (ctx *BuiltinEvalContext) init() { -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go deleted file mode 100644 index 4f90d5b..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ /dev/null @@ -1,208 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/config" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderName string - InitProviderProvider ResourceProvider - InitProviderError error - - ProviderCalled bool - ProviderName string - ProviderProvider ResourceProvider - - CloseProviderCalled bool - CloseProviderName string - CloseProviderProvider ResourceProvider - - ProviderInputCalled bool - ProviderInputName string - ProviderInputConfig map[string]interface{} - - SetProviderInputCalled bool - SetProviderInputName string - SetProviderInputConfig map[string]interface{} - - ConfigureProviderCalled bool - ConfigureProviderName string - ConfigureProviderConfig *ResourceConfig - ConfigureProviderError error - - SetProviderConfigCalled bool - SetProviderConfigName string - SetProviderConfigConfig *ResourceConfig - - ParentProviderConfigCalled bool - ParentProviderConfigName string - ParentProviderConfigConfig *ResourceConfig - - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner ResourceProvisioner - InitProvisionerError error - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner ResourceProvisioner - - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner ResourceProvisioner - - InterpolateCalled bool - InterpolateConfig *config.RawConfig - InterpolateResource *Resource - InterpolateConfigResult *ResourceConfig - InterpolateError error - - PathCalled bool - PathPath []string - - SetVariablesCalled bool - SetVariablesModule string - SetVariablesVariables map[string]interface{} - - DiffCalled bool - DiffDiff *Diff - DiffLock *sync.RWMutex - - StateCalled bool - StateState *State - StateLock *sync.RWMutex -} - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { - c.InitProviderCalled = true - c.InitProviderName = n - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(n string) ResourceProvider { - c.ProviderCalled = true - c.ProviderName = n - return c.ProviderProvider -} - -func (c *MockEvalContext) CloseProvider(n string) error { - c.CloseProviderCalled = true - c.CloseProviderName = n - return nil -} - -func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { - c.ConfigureProviderCalled = true - c.ConfigureProviderName = n - c.ConfigureProviderConfig = cfg - return c.ConfigureProviderError -} - -func (c *MockEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - c.SetProviderConfigCalled = true - c.SetProviderConfigName = n - c.SetProviderConfigConfig = cfg - return nil -} - -func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { - c.ParentProviderConfigCalled = true - c.ParentProviderConfigName = n - return c.ParentProviderConfigConfig -} - -func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { - c.ProviderInputCalled = true - c.ProviderInputName = n - return c.ProviderInputConfig -} - -func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { - c.SetProviderInputCalled = true - c.SetProviderInputName = n - c.SetProviderInputConfig = cfg -} - -func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerProvisioner, c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner -} - -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n - return nil -} - -func (c *MockEvalContext) Interpolate( - config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { - c.InterpolateCalled = true - c.InterpolateConfig = config - c.InterpolateResource = resource - return c.InterpolateConfigResult, c.InterpolateError -} - -func (c *MockEvalContext) Path() []string { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { - c.SetVariablesCalled = true - c.SetVariablesModule = n - c.SetVariablesVariables = vs -} - -func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { - c.DiffCalled = true - return c.DiffDiff, c.DiffLock -} - -func (c *MockEvalContext) State() (*State, *sync.RWMutex) { - c.StateCalled = true - return c.StateState, c.StateLock -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go deleted file mode 100644 index 2ae56a7..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go +++ /dev/null @@ -1,58 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config" -) - -// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -type EvalCountFixZeroOneBoundary struct { - Resource *config.Resource -} - -// TODO: test -func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { - // Get the count, important for knowing whether we're supposed to - // be adding the zero, or trimming it. - count, err := n.Resource.Count() - if err != nil { - return nil, err - } - - // Figure what to look for and what to replace it with - hunt := n.Resource.Id() - replace := hunt + ".0" - if count < 2 { - hunt, replace = replace, hunt - } - - state, lock := ctx.State() - - // Get a lock so we can access this instance and potentially make - // changes to it. - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := mod.Resources[hunt] - if !ok { - return nil, nil - } - - // If the replacement key exists, we just keep both - if _, ok := mod.Resources[replace]; ok { - return nil, nil - } - - mod.Resources[replace] = rs - delete(mod.Resources, hunt) - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go deleted file mode 100644 index 91e2b90..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go +++ /dev/null @@ -1,78 +0,0 @@ -package terraform - -import ( - "log" -) - -// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -// -// This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct{} - -// TODO: test -func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // Get the state and lock it since we'll potentially modify it - state, lock := ctx.State() - lock.Lock() - defer lock.Unlock() - - // Prune the state since we require a clean state to work - state.prune() - - // Go through each modules since the boundaries are restricted to a - // module scope. - for _, m := range state.Modules { - if err := n.fixModule(m); err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { - // Counts keeps track of keys and their counts - counts := make(map[string]int) - for k, _ := range m.Resources { - // Parse the key - key, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Set the index to -1 so that we can keep count - key.Index = -1 - - // Increment - counts[key.String()]++ - } - - // Go through the counts and do the fixup for each resource - for raw, count := range counts { - // Search and replace this resource - search := raw - replace := raw + ".0" - if count < 2 { - search, replace = replace, search - } - log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace) - - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := m.Resources[search] - if !ok { - continue - } - - // If the replacement key exists, we just keep both - if _, ok := m.Resources[replace]; ok { - continue - } - - m.Resources[replace] = rs - delete(m.Resources, search) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go deleted file mode 100644 index 54a8333..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// EvalCountCheckComputed is an EvalNode that checks if a resource count -// is computed and errors if so. This can possibly happen across a -// module boundary and we don't yet support this. -type EvalCountCheckComputed struct { - Resource *config.Resource -} - -// TODO: test -func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) { - if n.Resource.RawCount.Value() == unknownValue() { - return nil, fmt.Errorf( - "%s: value of 'count' cannot be computed", - n.Resource.Id()) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go deleted file mode 100644 index c35f908..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ /dev/null @@ -1,490 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/config" -) - -// EvalCompareDiff is an EvalNode implementation that compares two diffs -// and errors if the diffs are not equal. -type EvalCompareDiff struct { - Info *InstanceInfo - One, Two **InstanceDiff -} - -// TODO: test -func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { - one, two := *n.One, *n.Two - - // If either are nil, let them be empty - if one == nil { - one = new(InstanceDiff) - one.init() - } - if two == nil { - two = new(InstanceDiff) - two.init() - } - oneId, _ := one.GetAttribute("id") - twoId, _ := two.GetAttribute("id") - one.DelAttribute("id") - two.DelAttribute("id") - defer func() { - if oneId != nil { - one.SetAttribute("id", oneId) - } - if twoId != nil { - two.SetAttribute("id", twoId) - } - }() - - if same, reason := one.Same(two); !same { - log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) - log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) - log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) - log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) - return nil, fmt.Errorf( - "%s: diffs didn't match during apply. This is a bug with "+ - "Terraform and should be reported as a GitHub Issue.\n"+ - "\n"+ - "Please include the following information in your report:\n"+ - "\n"+ - " Terraform Version: %s\n"+ - " Resource ID: %s\n"+ - " Mismatch reason: %s\n"+ - " Diff One (usually from plan): %#v\n"+ - " Diff Two (usually from apply): %#v\n"+ - "\n"+ - "Also include as much context as you can about your config, state, "+ - "and the steps you performed to trigger this error.\n", - n.Info.Id, Version, n.Info.Id, reason, one, two) - } - - return nil, nil -} - -// EvalDiff is an EvalNode implementation that does a refresh for -// a resource. -type EvalDiff struct { - Name string - Info *InstanceInfo - Config **ResourceConfig - Provider *ResourceProvider - Diff **InstanceDiff - State **InstanceState - OutputDiff **InstanceDiff - OutputState **InstanceState - - // Resource is needed to fetch the ignore_changes list so we can - // filter user-requested ignored attributes from the diff. - Resource *config.Resource - - // Stub is used to flag the generated InstanceDiff as a stub. This is used to - // ensure that the node exists to perform interpolations and generate - // computed paths off of, but not as an actual diff where resouces should be - // counted, and not as a diff that should be acted on. - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) - }) - if err != nil { - return nil, err - } - } - - // The state for the diff must never be nil - diffState := state - if diffState == nil { - diffState = new(InstanceState) - } - diffState.init() - - // Diff! - diff, err := provider.Diff(n.Info, diffState, config) - if err != nil { - return nil, err - } - if diff == nil { - diff = new(InstanceDiff) - } - - // Set DestroyDeposed if we have deposed instances - _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { - if len(rs.Deposed) > 0 { - diff.DestroyDeposed = true - } - - return nil, nil - }) - if err != nil { - return nil, err - } - - // Preserve the DestroyTainted flag - if n.Diff != nil { - diff.SetTainted((*n.Diff).GetDestroyTainted()) - } - - // Require a destroy if there is an ID and it requires new. - if diff.RequiresNew() && state != nil && state.ID != "" { - diff.SetDestroy(true) - } - - // If we're creating a new resource, compute its ID - if diff.RequiresNew() || state == nil || state.ID == "" { - var oldID string - if state != nil { - oldID = state.Attributes["id"] - } - - // Add diff to compute new ID - diff.init() - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: oldID, - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) - } - - // filter out ignored resources - if err := n.processIgnoreChanges(diff); err != nil { - return nil, err - } - - // Call post-refresh hook - if !n.Stub { - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputDiff != nil { - *n.OutputDiff = diff - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = state - - // Merge our state so that the state is updated with our plan - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { - if diff == nil || n.Resource == nil || n.Resource.Id() == "" { - return nil - } - ignoreChanges := n.Resource.Lifecycle.IgnoreChanges - - if len(ignoreChanges) == 0 { - return nil - } - - // If we're just creating the resource, we shouldn't alter the - // Diff at all - if diff.ChangeType() == DiffCreate { - return nil - } - - // If the resource has been tainted then we don't process ignore changes - // since we MUST recreate the entire resource. - if diff.GetDestroyTainted() { - return nil - } - - attrs := diff.CopyAttributes() - - // get the complete set of keys we want to ignore - ignorableAttrKeys := make(map[string]bool) - for _, ignoredKey := range ignoreChanges { - for k := range attrs { - if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { - ignorableAttrKeys[k] = true - } - } - } - - // If the resource was being destroyed, check to see if we can ignore the - // reason for it being destroyed. - if diff.GetDestroy() { - for k, v := range attrs { - if k == "id" { - // id will always be changed if we intended to replace this instance - continue - } - if v.Empty() || v.NewComputed { - continue - } - - // If any RequiresNew attribute isn't ignored, we need to keep the diff - // as-is to be able to replace the resource. - if v.RequiresNew && !ignorableAttrKeys[k] { - return nil - } - } - - // Now that we know that we aren't replacing the instance, we can filter - // out all the empty and computed attributes. There may be a bunch of - // extraneous attribute diffs for the other non-requires-new attributes - // going from "" -> "configval" or "" -> "". - // We must make sure any flatmapped containers are filterred (or not) as a - // whole. - containers := groupContainers(diff) - keep := map[string]bool{} - for _, v := range containers { - if v.keepDiff() { - // At least one key has changes, so list all the sibling keys - // to keep in the diff. - for k := range v { - keep[k] = true - } - } - } - - for k, v := range attrs { - if (v.Empty() || v.NewComputed) && !keep[k] { - ignorableAttrKeys[k] = true - } - } - } - - // Here we undo the two reactions to RequireNew in EvalDiff - the "id" - // attribute diff and the Destroy boolean field - log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + - "because after ignore_changes, this diff no longer requires replacement") - diff.DelAttribute("id") - diff.SetDestroy(false) - - // If we didn't hit any of our early exit conditions, we can filter the diff. - for k := range ignorableAttrKeys { - log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", - n.Resource.Id(), k) - diff.DelAttribute(k) - } - - return nil -} - -// a group of key-*ResourceAttrDiff pairs from the same flatmapped container -type flatAttrDiff map[string]*ResourceAttrDiff - -// we need to keep all keys if any of them have a diff -func (f flatAttrDiff) keepDiff() bool { - for _, v := range f { - if !v.Empty() && !v.NewComputed { - return true - } - } - return false -} - -// sets, lists and maps need to be compared for diff inclusion as a whole, so -// group the flatmapped keys together for easier comparison. -func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { - isIndex := multiVal.MatchString - containers := map[string]flatAttrDiff{} - attrs := d.CopyAttributes() - // we need to loop once to find the index key - for k := range attrs { - if isIndex(k) { - // add the key, always including the final dot to fully qualify it - containers[k[:len(k)-1]] = flatAttrDiff{} - } - } - - // loop again to find all the sub keys - for prefix, values := range containers { - for k, attrDiff := range attrs { - // we include the index value as well, since it could be part of the diff - if strings.HasPrefix(k, prefix) { - values[k] = attrDiff - } - } - } - - return containers -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Info *InstanceInfo - State **InstanceState - Output **InstanceDiff -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - // If there is no state or we don't have an ID, we're already destroyed - if state == nil || state.ID == "" { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) - }) - if err != nil { - return nil, err - } - - // The diff - diff := &InstanceDiff{Destroy: true} - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = diff - - return nil, nil -} - -// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to -// the full diff. -type EvalDiffDestroyModule struct { - Path []string -} - -// TODO: test -func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() - - // Write the diff - modDiff := diff.ModuleByPath(n.Path) - if modDiff == nil { - modDiff = diff.AddModule(n.Path) - } - modDiff.Destroy = true - - return nil, nil -} - -// EvalFilterDiff is an EvalNode implementation that filters the diff -// according to some filter. -type EvalFilterDiff struct { - // Input and output - Diff **InstanceDiff - Output **InstanceDiff - - // Destroy, if true, will only include a destroy diff if it is set. - Destroy bool -} - -func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { - if *n.Diff == nil { - return nil, nil - } - - input := *n.Diff - result := new(InstanceDiff) - - if n.Destroy { - if input.GetDestroy() || input.RequiresNew() { - result.SetDestroy(true) - } - } - - if n.Output != nil { - *n.Output = result - } - - return nil, nil -} - -// EvalReadDiff is an EvalNode implementation that writes the diff to -// the full diff. -type EvalReadDiff struct { - Name string - Diff **InstanceDiff -} - -func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() - - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { - return nil, nil - } - - *n.Diff = modDiff.Resources[n.Name] - - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that writes the diff to -// the full diff. -type EvalWriteDiff struct { - Name string - Diff **InstanceDiff -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // The diff to write, if its empty it should write nil - var diffVal *InstanceDiff - if n.Diff != nil { - diffVal = *n.Diff - } - if diffVal.Empty() { - diffVal = nil - } - - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() - - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { - modDiff = diff.AddModule(ctx.Path()) - } - if diffVal != nil { - modDiff.Resources[n.Name] = diffVal - } else { - delete(modDiff.Resources, n.Name) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go deleted file mode 100644 index 470f798..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// EvalReturnError is an EvalNode implementation that returns an -// error if it is present. -// -// This is useful for scenarios where an error has been captured by -// another EvalNode (like EvalApply) for special EvalTree-based error -// handling, and that handling has completed, so the error should be -// returned normally. -type EvalReturnError struct { - Error *error -} - -func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { - if n.Error == nil { - return nil, nil - } - - return nil, *n.Error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go deleted file mode 100644 index 711c625..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -// EvalNodeFilterFunc is the callback used to replace a node with -// another to node. To not do the replacement, just return the input node. -type EvalNodeFilterFunc func(EvalNode) EvalNode - -// EvalNodeFilterable is an interface that can be implemented by -// EvalNodes to allow filtering of sub-elements. Note that this isn't -// a common thing to implement and you probably don't need it. -type EvalNodeFilterable interface { - EvalNode - Filter(EvalNodeFilterFunc) -} - -// EvalFilter runs the filter on the given node and returns the -// final filtered value. This should be called rather than checking -// the EvalNode directly since this will properly handle EvalNodeFilterables. -func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { - if f, ok := node.(EvalNodeFilterable); ok { - f.Filter(fn) - return node - } - - return fn(node) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go deleted file mode 100644 index 1a55f02..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -// EvalNodeOpFilterable is an interface that EvalNodes can implement -// to be filterable by the operation that is being run on Terraform. -type EvalNodeOpFilterable interface { - IncludeInOp(walkOperation) bool -} - -// EvalNodeFilterOp returns a filter function that filters nodes that -// include themselves in specific operations. -func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { - return func(n EvalNode) EvalNode { - include := true - if of, ok := n.(EvalNodeOpFilterable); ok { - include = of.IncludeInOp(op) - } - if include { - return n - } - - return EvalNoop{} - } -} - -// EvalOpFilter is an EvalNode implementation that is a proxy to -// another node but filters based on the operation. -type EvalOpFilter struct { - // Ops is the list of operations to include this node in. - Ops []walkOperation - - // Node is the node to execute - Node EvalNode -} - -// TODO: test -func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { - return EvalRaw(n.Node, ctx) -} - -// EvalNodeOpFilterable impl. -func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { - for _, v := range n.Ops { - if v == op { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go deleted file mode 100644 index d6b46a1..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// EvalIf is an EvalNode that is a conditional. -type EvalIf struct { - If func(EvalContext) (bool, error) - Then EvalNode - Else EvalNode -} - -// TODO: test -func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { - yes, err := n.If(ctx) - if err != nil { - return nil, err - } - - if yes { - return EvalRaw(n.Then, ctx) - } else { - if n.Else != nil { - return EvalRaw(n.Else, ctx) - } - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go deleted file mode 100644 index 62cc581..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go +++ /dev/null @@ -1,76 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// EvalImportState is an EvalNode implementation that performs an -// ImportState operation on a provider. This will return the imported -// states but won't modify any actual state. -type EvalImportState struct { - Provider *ResourceProvider - Info *InstanceInfo - Id string - Output *[]*InstanceState -} - -// TODO: test -func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - - { - // Call pre-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(n.Info, n.Id) - }) - if err != nil { - return nil, err - } - } - - // Import! - state, err := provider.ImportState(n.Info, n.Id) - if err != nil { - return nil, fmt.Errorf( - "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) - } - - if n.Output != nil { - *n.Output = state - } - - { - // Call post-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(n.Info, state) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalImportStateVerify verifies the state after ImportState and -// after the refresh to make sure it is non-nil and valid. -type EvalImportStateVerify struct { - Info *InstanceInfo - Id string - State **InstanceState -} - -// TODO: test -func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - if state.Empty() { - return nil, fmt.Errorf( - "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ - "exist. Please verify the ID is correct. You cannot import non-existent\n"+ - "resources using Terraform import.", - n.Info.HumanId(), - n.Id) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go deleted file mode 100644 index df3bcb9..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go +++ /dev/null @@ -1,53 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" -) - -// EvalInterpolate is an EvalNode implementation that takes a raw -// configuration and interpolates it. -type EvalInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig -} - -func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { - rc, err := ctx.Interpolate(n.Config, n.Resource) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = rc - } - - return nil, nil -} - -// EvalTryInterpolate is an EvalNode implementation that takes a raw -// configuration and interpolates it, but only logs a warning on an -// interpolation error, and stops further Eval steps. -// This is used during Input where a value may not be known before Refresh, but -// we don't want to block Input. -type EvalTryInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig -} - -func (n *EvalTryInterpolate) Eval(ctx EvalContext) (interface{}, error) { - rc, err := ctx.Interpolate(n.Config, n.Resource) - if err != nil { - log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err) - return nil, EvalEarlyExitError{} - } - - if n.Output != nil { - *n.Output = rc - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go deleted file mode 100644 index f4bc822..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// EvalNoop is an EvalNode that does nothing. -type EvalNoop struct{} - -func (EvalNoop) Eval(EvalContext) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go deleted file mode 100644 index cf61781..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ /dev/null @@ -1,119 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/config" -) - -// EvalDeleteOutput is an EvalNode implementation that deletes an output -// from the state. -type EvalDeleteOutput struct { - Name string -} - -// TODO: test -func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - if state == nil { - return nil, nil - } - - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - delete(mod.Outputs, n.Name) - - return nil, nil -} - -// EvalWriteOutput is an EvalNode implementation that writes the output -// for the given name to the current state. -type EvalWriteOutput struct { - Name string - Sensitive bool - Value *config.RawConfig -} - -// TODO: test -func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - // Log error but continue anyway - log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) - } - - state, lock := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") - } - - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) - } - - // Get the value from the config - var valueRaw interface{} = config.UnknownVariableValue - if cfg != nil { - var ok bool - valueRaw, ok = cfg.Get("value") - if !ok { - valueRaw = "" - } - if cfg.IsComputed("value") { - valueRaw = config.UnknownVariableValue - } - } - - switch valueTyped := valueRaw.(type) { - case string: - mod.Outputs[n.Name] = &OutputState{ - Type: "string", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "list", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case map[string]interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "map", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []map[string]interface{}: - // an HCL map is multi-valued, so if this was read out of a config the - // map may still be in a slice. - if len(valueTyped) == 1 { - mod.Outputs[n.Name] = &OutputState{ - Type: "map", - Sensitive: n.Sensitive, - Value: valueTyped[0], - } - break - } - return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", - n.Name, valueTyped, len(valueTyped)) - default: - return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go deleted file mode 100644 index 092fd18..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ /dev/null @@ -1,164 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// EvalSetProviderConfig sets the parent configuration for a provider -// without configuring that provider, validating it, etc. -type EvalSetProviderConfig struct { - Provider string - Config **ResourceConfig -} - -func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.SetProviderConfig(n.Provider, *n.Config) -} - -// EvalBuildProviderConfig outputs a *ResourceConfig that is properly -// merged with parents and inputs on top of what is configured in the file. -type EvalBuildProviderConfig struct { - Provider string - Config **ResourceConfig - Output **ResourceConfig -} - -func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - cfg := *n.Config - - // If we have a configuration set, then merge that in - if input := ctx.ProviderInput(n.Provider); input != nil { - // "input" is a map of the subset of config values that were known - // during the input walk, set by EvalInputProvider. Note that - // in particular it does *not* include attributes that had - // computed values at input time; those appear *only* in - // "cfg" here. - rc, err := config.NewRawConfig(input) - if err != nil { - return nil, err - } - - merged := cfg.raw.Merge(rc) - cfg = NewResourceConfig(merged) - } - - // Get the parent configuration if there is one - if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { - merged := cfg.raw.Merge(parent.raw) - cfg = NewResourceConfig(merged) - } - - *n.Output = cfg - return nil, nil -} - -// EvalConfigProvider is an EvalNode implementation that configures -// a provider that is already initialized and retrieved. -type EvalConfigProvider struct { - Provider string - Config **ResourceConfig -} - -func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.ConfigureProvider(n.Provider, *n.Config) -} - -// EvalInitProvider is an EvalNode implementation that initializes a provider -// and returns nothing. The provider can be retrieved again with the -// EvalGetProvider node. -type EvalInitProvider struct { - Name string -} - -func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.Name) -} - -// EvalCloseProvider is an EvalNode implementation that closes provider -// connections that aren't needed anymore. -type EvalCloseProvider struct { - Name string -} - -func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Name) - return nil, nil -} - -// EvalGetProvider is an EvalNode implementation that retrieves an already -// initialized provider instance for the given name. -type EvalGetProvider struct { - Name string - Output *ResourceProvider -} - -func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provider(n.Name) - if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - return nil, nil -} - -// EvalInputProvider is an EvalNode implementation that asks for input -// for the given provider configurations. -type EvalInputProvider struct { - Name string - Provider *ResourceProvider - Config **ResourceConfig -} - -func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { - // If we already configured this provider, then don't do this again - if v := ctx.ProviderInput(n.Name); v != nil { - return nil, nil - } - - rc := *n.Config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: fmt.Sprintf("provider.%s", n.Name), - QueryPrefix: fmt.Sprintf("provider.%s.", n.Name), - UIInput: ctx.Input(), - } - - // Go through each provider and capture the input necessary - // to satisfy it. - config, err := (*n.Provider).Input(input, rc) - if err != nil { - return nil, fmt.Errorf( - "Error configuring %s: %s", n.Name, err) - } - - // Set the input that we received so that child modules don't attempt - // to ask for input again. - if config != nil && len(config.Config) > 0 { - // This repository of provider input results on the context doesn't - // retain config.ComputedKeys, so we need to filter those out here - // in order that later users of this data won't try to use the unknown - // value placeholder as if it were a literal value. This map is just - // of known values we've been able to complete so far; dynamic stuff - // will be merged in by EvalBuildProviderConfig on subsequent - // (post-input) walks. - confMap := config.Config - if config.ComputedKeys != nil { - for _, key := range config.ComputedKeys { - delete(confMap, key) - } - } - - ctx.SetProviderInput(n.Name, confMap) - } else { - ctx.SetProviderInput(n.Name, map[string]interface{}{}) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go deleted file mode 100644 index 89579c0..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go +++ /dev/null @@ -1,47 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner -// and returns nothing. The provisioner can be retrieved again with the -// EvalGetProvisioner node. -type EvalInitProvisioner struct { - Name string -} - -func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvisioner(n.Name) -} - -// EvalCloseProvisioner is an EvalNode implementation that closes provisioner -// connections that aren't needed anymore. -type EvalCloseProvisioner struct { - Name string -} - -func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvisioner(n.Name) - return nil, nil -} - -// EvalGetProvisioner is an EvalNode implementation that retrieves an already -// initialized provisioner instance for the given name. -type EvalGetProvisioner struct { - Name string - Output *ResourceProvisioner -} - -func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provisioner(n.Name) - if result == nil { - return nil, fmt.Errorf("provisioner %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go deleted file mode 100644 index fb85a28..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ /dev/null @@ -1,139 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// EvalReadDataDiff is an EvalNode implementation that executes a data -// resource's ReadDataDiff method to discover what attributes it exports. -type EvalReadDataDiff struct { - Provider *ResourceProvider - Output **InstanceDiff - OutputState **InstanceState - Config **ResourceConfig - Info *InstanceInfo - - // Set Previous when re-evaluating diff during apply, to ensure that - // the "Destroy" flag is preserved. - Previous **InstanceDiff -} - -func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, nil) - }) - if err != nil { - return nil, err - } - - var diff *InstanceDiff - - if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { - // If we're re-diffing for a diff that was already planning to - // destroy, then we'll just continue with that plan. - diff = &InstanceDiff{Destroy: true} - } else { - provider := *n.Provider - config := *n.Config - - var err error - diff, err = provider.ReadDataDiff(n.Info, config) - if err != nil { - return nil, err - } - if diff == nil { - diff = new(InstanceDiff) - } - - // if id isn't explicitly set then it's always computed, because we're - // always "creating a new resource". - diff.init() - if _, ok := diff.Attributes["id"]; !ok { - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) - } - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) - }) - if err != nil { - return nil, err - } - - *n.Output = diff - - if n.OutputState != nil { - state := &InstanceState{} - *n.OutputState = state - - // Apply the diff to the returned state, so the state includes - // any attribute values that are not computed. - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) - } - } - - return nil, nil -} - -// EvalReadDataApply is an EvalNode implementation that executes a data -// resource's ReadDataApply method to read data from the data source. -type EvalReadDataApply struct { - Provider *ResourceProvider - Output **InstanceState - Diff **InstanceDiff - Info *InstanceInfo -} - -func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test - provider := *n.Provider - diff := *n.Diff - - // If the diff is for *destroying* this resource then we'll - // just drop its state and move on, since data resources don't - // support an actual "destroy" action. - if diff != nil && diff.GetDestroy() { - if n.Output != nil { - *n.Output = nil - } - return nil, nil - } - - // For the purpose of external hooks we present a data apply as a - // "Refresh" rather than an "Apply" because creating a data source - // is presented to users/callers as a "read" operation. - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(n.Info, &InstanceState{}) - }) - if err != nil { - return nil, err - } - - state, err := provider.ReadDataApply(n.Info, diff) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err) - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = state - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go deleted file mode 100644 index fa2b812..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "fmt" - "log" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Provider *ResourceProvider - State **InstanceState - Info *InstanceInfo - Output **InstanceState -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - state := *n.State - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) - return nil, nil - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(n.Info, state) - }) - if err != nil { - return nil, err - } - - // Refresh! - state, err = provider.Refresh(n.Info, state) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) - } - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = state - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go deleted file mode 100644 index 5eca678..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -// EvalInstanceInfo is an EvalNode implementation that fills in the -// InstanceInfo as much as it can. -type EvalInstanceInfo struct { - Info *InstanceInfo -} - -// TODO: test -func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) { - n.Info.ModulePath = ctx.Path() - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go deleted file mode 100644 index 82d8178..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -// EvalSequence is an EvalNode that evaluates in sequence. -type EvalSequence struct { - Nodes []EvalNode -} - -func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { - for _, n := range n.Nodes { - if n == nil { - continue - } - - if _, err := EvalRaw(n, ctx); err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalNodeFilterable impl. -func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { - for i, node := range n.Nodes { - n.Nodes[i] = fn(node) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go deleted file mode 100644 index 1f67e3d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ /dev/null @@ -1,326 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// EvalReadState is an EvalNode implementation that reads the -// primary InstanceState for a specific resource out of the state. -type EvalReadState struct { - Name string - Output **InstanceState -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - return rs.Primary, nil - }) -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - Name string - Output **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 for - // the last item. - Index int -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - // Get the index. If it is negative, then we get the last one - idx := n.Index - if idx < 0 { - idx = len(rs.Deposed) - 1 - } - if idx >= 0 && idx < len(rs.Deposed) { - return rs.Deposed[idx], nil - } else { - return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs) - } - }) -} - -// Does the bulk of the work for the various flavors of ReadState eval nodes. -// Each node just provides a reader function to get from the ResourceState to the -// InstanceState, and this takes care of all the plumbing. -func readInstanceFromState( - ctx EvalContext, - resourceName string, - output **InstanceState, - readerFn func(*ResourceState) (*InstanceState, error), -) (*InstanceState, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[resourceName] - if rs == nil { - return nil, nil - } - - // Use the delegate function to get the instance state from the resource state - is, err := readerFn(rs) - if err != nil { - return nil, err - } - - // Write the result to the output pointer - if output != nil { - *output = is - } - - return is, nil -} - -// EvalRequireState is an EvalNode implementation that early exits -// if the state doesn't have an ID. -type EvalRequireState struct { - State **InstanceState -} - -func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - return nil, EvalEarlyExitError{} - } - - state := *n.State - if state == nil || state.ID == "" { - return nil, EvalEarlyExitError{} - } - - return nil, nil -} - -// EvalUpdateStateHook is an EvalNode implementation that calls the -// PostStateUpdate hook with the current state. -type EvalUpdateStateHook struct{} - -func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a full lock. Even calling something like WriteState can modify - // (prune) the state, so we need the full lock. - lock.Lock() - defer lock.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - if err != nil { - return nil, err - } - - return nil, nil -} - -// EvalWriteState is an EvalNode implementation that writes the -// primary InstanceState for a specific resource into the state. -type EvalWriteState struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - rs.Primary = *n.State - return nil - }, - ) -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 to append. - Index int -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - if n.Index == -1 { - rs.Deposed = append(rs.Deposed, *n.State) - } else { - rs.Deposed[n.Index] = *n.State - } - return nil - }, - ) -} - -// Pulls together the common tasks of the EvalWriteState nodes. All the args -// are passed directly down from the EvalNode along with a `writer` function -// which is yielded the *ResourceState and is responsible for writing an -// InstanceState to the proper field in the ResourceState. -func writeInstanceToState( - ctx EvalContext, - resourceName string, - resourceType string, - provider string, - dependencies []string, - writerFn func(*ResourceState) error, -) (*InstanceState, error) { - state, lock := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") - } - - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) - } - - // Look for the resource state. - rs := mod.Resources[resourceName] - if rs == nil { - rs = &ResourceState{} - rs.init() - mod.Resources[resourceName] = rs - } - rs.Type = resourceType - rs.Dependencies = dependencies - rs.Provider = provider - - if err := writerFn(rs); err != nil { - return nil, err - } - - return nil, nil -} - -// EvalClearPrimaryState is an EvalNode implementation that clears the primary -// instance from a resource state. -type EvalClearPrimaryState struct { - Name string -} - -func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // Clear primary from the resource state - rs.Primary = nil - - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that takes the primary -// out of a state and makes it Deposed. This is done at the beginning of -// create-before-destroy calls so that the create can create while preserving -// the old state of the to-be-destroyed resource. -type EvalDeposeState struct { - Name string -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // If we don't have a primary, we have nothing to depose - if rs.Primary == nil { - return nil, nil - } - - // Depose - rs.Deposed = append(rs.Deposed, rs.Primary) - rs.Primary = nil - - return nil, nil -} - -// EvalUndeposeState is an EvalNode implementation that reads the -// InstanceState for a specific resource out of the state. -type EvalUndeposeState struct { - Name string - State **InstanceState -} - -// TODO: test -func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // If we don't have any desposed resource, then we don't have anything to do - if len(rs.Deposed) == 0 { - return nil, nil - } - - // Undepose - idx := len(rs.Deposed) - 1 - rs.Primary = rs.Deposed[idx] - rs.Deposed[idx] = *n.State - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go deleted file mode 100644 index 478aa64..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ /dev/null @@ -1,227 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/mitchellh/mapstructure" -) - -// EvalValidateError is the error structure returned if there were -// validation errors. -type EvalValidateError struct { - Warnings []string - Errors []error -} - -func (e *EvalValidateError) Error() string { - return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors) -} - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *config.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var count int - var errs []error - var err error - if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { - errs = append(errs, fmt.Errorf( - "Failed to interpolate count: %s", err)) - goto RETURN - } - - count, err = n.Resource.Count() - if err != nil { - // If we can't get the count during validation, then - // just replace it with the number 1. - c := n.Resource.RawCount.Config() - c[n.Resource.RawCount.Key] = "1" - count = 1 - } - err = nil - - if count < 0 { - errs = append(errs, fmt.Errorf( - "Count is less than zero: %d", count)) - } - -RETURN: - if len(errs) != 0 { - err = &EvalValidateError{ - Errors: errs, - } - } - return nil, err -} - -// EvalValidateProvider is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateProvider struct { - Provider *ResourceProvider - Config **ResourceConfig -} - -func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - config := *n.Config - - warns, errs := provider.Validate(config) - if len(warns) == 0 && len(errs) == 0 { - return nil, nil - } - - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, - } -} - -// EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateProvisioner struct { - Provisioner *ResourceProvisioner - Config **ResourceConfig - ConnConfig **ResourceConfig -} - -func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { - provisioner := *n.Provisioner - config := *n.Config - var warns []string - var errs []error - - { - // Validate the provisioner's own config first - w, e := provisioner.Validate(config) - warns = append(warns, w...) - errs = append(errs, e...) - } - - { - // Now validate the connection config, which might either be from - // the provisioner block itself or inherited from the resource's - // shared connection info. - w, e := n.validateConnConfig(*n.ConnConfig) - warns = append(warns, w...) - errs = append(errs, e...) - } - - if len(warns) == 0 && len(errs) == 0 { - return nil, nil - } - - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, - } -} - -func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - type connConfigSuperset struct { - // All attribute types are interface{} here because at this point we - // may still have unresolved interpolation expressions, which will - // appear as strings regardless of the final goal type. - - Type interface{} `mapstructure:"type"` - User interface{} `mapstructure:"user"` - Password interface{} `mapstructure:"password"` - Host interface{} `mapstructure:"host"` - Port interface{} `mapstructure:"port"` - Timeout interface{} `mapstructure:"timeout"` - ScriptPath interface{} `mapstructure:"script_path"` - - // For type=ssh only (enforced in ssh communicator) - PrivateKey interface{} `mapstructure:"private_key"` - Agent interface{} `mapstructure:"agent"` - BastionHost interface{} `mapstructure:"bastion_host"` - BastionPort interface{} `mapstructure:"bastion_port"` - BastionUser interface{} `mapstructure:"bastion_user"` - BastionPassword interface{} `mapstructure:"bastion_password"` - BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` - - // For type=winrm only (enforced in winrm communicator) - HTTPS interface{} `mapstructure:"https"` - Insecure interface{} `mapstructure:"insecure"` - CACert interface{} `mapstructure:"cacert"` - } - - var metadata mapstructure.Metadata - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - Metadata: &metadata, - Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys - }) - if err != nil { - // should never happen - errs = append(errs, err) - return - } - - if err := decoder.Decode(connConfig.Config); err != nil { - errs = append(errs, err) - return - } - - for _, attrName := range metadata.Unused { - errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) - } - return -} - -// EvalValidateResource is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateResource struct { - Provider *ResourceProvider - Config **ResourceConfig - ResourceName string - ResourceType string - ResourceMode config.ResourceMode - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool -} - -func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - cfg := *n.Config - var warns []string - var errs []error - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch n.ResourceMode { - case config.ManagedResourceMode: - warns, errs = provider.ValidateResource(n.ResourceType, cfg) - case config.DataResourceMode: - warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) - } - - // If the resource name doesn't match the name regular - // expression, show an error. - if !config.NameRegexp.Match([]byte(n.ResourceName)) { - errs = append(errs, fmt.Errorf( - "%s: resource name can only contain letters, numbers, "+ - "dashes, and underscores.", n.ResourceName)) - } - - if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { - return nil, nil - } - - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go deleted file mode 100644 index ae4436a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// EvalValidateResourceSelfRef is an EvalNode implementation that validates that -// a configuration doesn't contain a reference to the resource itself. -// -// This must be done prior to interpolating configuration in order to avoid -// any infinite loop scenarios. -type EvalValidateResourceSelfRef struct { - Addr **ResourceAddress - Config **config.RawConfig -} - -func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { - addr := *n.Addr - conf := *n.Config - - // Go through the variables and find self references - var errs []error - for k, raw := range conf.Variables { - rv, ok := raw.(*config.ResourceVariable) - if !ok { - continue - } - - // Build an address from the variable - varAddr := &ResourceAddress{ - Path: addr.Path, - Mode: rv.Mode, - Type: rv.Type, - Name: rv.Name, - Index: rv.Index, - InstanceType: TypePrimary, - } - - // If the variable access is a multi-access (*), then we just - // match the index so that we'll match our own addr if everything - // else matches. - if rv.Multi && rv.Index == -1 { - varAddr.Index = addr.Index - } - - // This is a weird thing where ResourceAddres has index "-1" when - // index isn't set at all. This means index "0" for resource access. - // So, if we have this scenario, just set our varAddr to -1 so it - // matches. - if addr.Index == -1 && varAddr.Index == 0 { - varAddr.Index = -1 - } - - // If the addresses match, then this is a self reference - if varAddr.Equals(addr) && varAddr.Index == addr.Index { - errs = append(errs, fmt.Errorf( - "%s: self reference not allowed: %q", - addr, k)) - } - } - - // If no errors, no errors! - if len(errs) == 0 { - return nil, nil - } - - // Wrap the errors in the proper wrapper so we can handle validation - // formatting properly upstream. - return nil, &EvalValidateError{ - Errors: errs, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go deleted file mode 100644 index e39a33c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ /dev/null @@ -1,279 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "reflect" - "strconv" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" -) - -// EvalTypeCheckVariable is an EvalNode which ensures that the variable -// values which are assigned as inputs to a module (including the root) -// match the types which are either declared for the variables explicitly -// or inferred from the default values. -// -// In order to achieve this three things are required: -// - a map of the proposed variable values -// - the configuration tree of the module in which the variable is -// declared -// - the path to the module (so we know which part of the tree to -// compare the values against). -type EvalTypeCheckVariable struct { - Variables map[string]interface{} - ModulePath []string - ModuleTree *module.Tree -} - -func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { - currentTree := n.ModuleTree - for _, pathComponent := range n.ModulePath[1:] { - currentTree = currentTree.Children()[pathComponent] - } - targetConfig := currentTree.Config() - - prototypes := make(map[string]config.VariableType) - for _, variable := range targetConfig.Variables { - prototypes[variable.Name] = variable.Type() - } - - // Only display a module in an error message if we are not in the root module - modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], ".")) - if len(n.ModulePath) == 1 { - modulePathDescription = "" - } - - for name, declaredType := range prototypes { - proposedValue, ok := n.Variables[name] - if !ok { - // This means the default value should be used as no overriding value - // has been set. Therefore we should continue as no check is necessary. - continue - } - - if proposedValue == config.UnknownVariableValue { - continue - } - - switch declaredType { - case config.VariableTypeString: - switch proposedValue.(type) { - case string: - continue - default: - return nil, fmt.Errorf("variable %s%s should be type %s, got %s", - name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) - } - case config.VariableTypeMap: - switch proposedValue.(type) { - case map[string]interface{}: - continue - default: - return nil, fmt.Errorf("variable %s%s should be type %s, got %s", - name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) - } - case config.VariableTypeList: - switch proposedValue.(type) { - case []interface{}: - continue - default: - return nil, fmt.Errorf("variable %s%s should be type %s, got %s", - name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) - } - default: - return nil, fmt.Errorf("variable %s%s should be type %s, got type string", - name, modulePathDescription, declaredType.Printable()) - } - } - - return nil, nil -} - -// EvalSetVariables is an EvalNode implementation that sets the variables -// explicitly for interpolation later. -type EvalSetVariables struct { - Module *string - Variables map[string]interface{} -} - -// TODO: test -func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetVariables(*n.Module, n.Variables) - return nil, nil -} - -// EvalVariableBlock is an EvalNode implementation that evaluates the -// given configuration, and uses the final values as a way to set the -// mapping. -type EvalVariableBlock struct { - Config **ResourceConfig - VariableValues map[string]interface{} -} - -func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { - // Clear out the existing mapping - for k, _ := range n.VariableValues { - delete(n.VariableValues, k) - } - - // Get our configuration - rc := *n.Config - for k, v := range rc.Config { - vKind := reflect.ValueOf(v).Type().Kind() - - switch vKind { - case reflect.Slice: - var vSlice []interface{} - if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { - n.VariableValues[k] = vSlice - continue - } - case reflect.Map: - var vMap map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { - n.VariableValues[k] = vMap - continue - } - default: - var vString string - if err := hilmapstructure.WeakDecode(v, &vString); err == nil { - n.VariableValues[k] = vString - continue - } - } - - return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) - } - - for _, path := range rc.ComputedKeys { - log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) - err := n.setUnknownVariableValueForPath(path) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { - pathComponents := strings.Split(path, ".") - - if len(pathComponents) < 1 { - return fmt.Errorf("No path comoponents in %s", path) - } - - if len(pathComponents) == 1 { - // Special case the "top level" since we know the type - if _, ok := n.VariableValues[pathComponents[0]]; !ok { - n.VariableValues[pathComponents[0]] = config.UnknownVariableValue - } - return nil - } - - // Otherwise find the correct point in the tree and then set to unknown - var current interface{} = n.VariableValues[pathComponents[0]] - for i := 1; i < len(pathComponents); i++ { - switch tCurrent := current.(type) { - case []interface{}: - index, err := strconv.Atoi(pathComponents[i]) - if err != nil { - return fmt.Errorf("Cannot convert %s to slice index in path %s", - pathComponents[i], path) - } - current = tCurrent[index] - case []map[string]interface{}: - index, err := strconv.Atoi(pathComponents[i]) - if err != nil { - return fmt.Errorf("Cannot convert %s to slice index in path %s", - pathComponents[i], path) - } - current = tCurrent[index] - case map[string]interface{}: - if val, hasVal := tCurrent[pathComponents[i]]; hasVal { - current = val - continue - } - - tCurrent[pathComponents[i]] = config.UnknownVariableValue - break - } - } - - return nil -} - -// EvalCoerceMapVariable is an EvalNode implementation that recognizes a -// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a -// bare map literal is indistinguishable from a list of maps w/ one element. -// -// We take all the same inputs as EvalTypeCheckVariable above, since we need -// both the target type and the proposed value in order to properly coerce. -type EvalCoerceMapVariable struct { - Variables map[string]interface{} - ModulePath []string - ModuleTree *module.Tree -} - -// Eval implements the EvalNode interface. See EvalCoerceMapVariable for -// details. -func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { - currentTree := n.ModuleTree - for _, pathComponent := range n.ModulePath[1:] { - currentTree = currentTree.Children()[pathComponent] - } - targetConfig := currentTree.Config() - - prototypes := make(map[string]config.VariableType) - for _, variable := range targetConfig.Variables { - prototypes[variable.Name] = variable.Type() - } - - for name, declaredType := range prototypes { - if declaredType != config.VariableTypeMap { - continue - } - - proposedValue, ok := n.Variables[name] - if !ok { - continue - } - - if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { - if m, ok := list[0].(map[string]interface{}); ok { - log.Printf("[DEBUG] EvalCoerceMapVariable: "+ - "Coercing single element list into map: %#v", m) - n.Variables[name] = m - } - } - } - - return nil, nil -} - -// hclTypeName returns the name of the type that would represent this value in -// a config file, or falls back to the Go type name if there's no corresponding -// HCL type. This is used for formatted output, not for comparing types. -func hclTypeName(i interface{}) string { - switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k { - case reflect.Bool: - return "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: - return "number" - case reflect.Array, reflect.Slice: - return "list" - case reflect.Map: - return "map" - case reflect.String: - return "string" - default: - // fall back to the Go type if there's no match - return k.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go deleted file mode 100644 index 00392ef..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ /dev/null @@ -1,119 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config" -) - -// ProviderEvalTree returns the evaluation tree for initializing and -// configuring providers. -func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { - var provider ResourceProvider - var resourceConfig *ResourceConfig - - seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{Name: n}) - - // Input stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkInput, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n, - Output: &provider, - }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalInputProvider{ - Name: n, - Provider: &provider, - Config: &resourceConfig, - }, - }, - }, - }) - - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n, - Output: &provider, - }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalValidateProvider{ - Provider: &provider, - Config: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, - }, - }, - }) - - // Apply stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n, - Output: &provider, - }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, - }, - }, - }) - - // We configure on everything but validate, since validate may - // not have access to all the variables. - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalConfigProvider{ - Provider: n, - Config: &resourceConfig, - }, - }, - }, - }) - - return &EvalSequence{Nodes: seq} -} - -// CloseProviderEvalTree returns the evaluation tree for closing -// provider connections that aren't needed anymore. -func CloseProviderEvalTree(n string) EvalNode { - return &EvalCloseProvider{Name: n} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go deleted file mode 100644 index 735ec4e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ /dev/null @@ -1,172 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "runtime/debug" - "strings" - - "github.com/hashicorp/terraform/dag" -) - -// RootModuleName is the name given to the root module implicitly. -const RootModuleName = "root" - -// RootModulePath is the path for the root module. -var RootModulePath = []string{RootModuleName} - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - // The root is represented by a single element list containing - // RootModuleName - Path []string - - // debugName is a name for reference in the debug output. This is usually - // to indicate what topmost builder was, and if this graph is a shadow or - // not. - debugName string -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) error { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) error { - // The callbacks for enter/exiting a graph - ctx := walker.EnterPath(g.Path) - defer walker.ExitPath(g.Path) - - // Get the path for logs - path := strings.Join(ctx.Path(), ".") - - // Determine if our walker is a panic wrapper - panicwrap, ok := walker.(GraphWalkerPanicwrapper) - if !ok { - panicwrap = nil // just to be sure - } - - debugName := "walk-graph.json" - if g.debugName != "" { - debugName = g.debugName + "-" + debugName - } - - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - - // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) - g.DebugVisitInfo(v, g.debugName) - - // If we have a panic wrap GraphWalker and a panic occurs, recover - // and call that. We ensure the return value is an error, however, - // so that future nodes are not called. - defer func() { - // If no panicwrap, do nothing - if panicwrap == nil { - return - } - - // If no panic, do nothing - err := recover() - if err == nil { - return - } - - // Modify the return value to show the error - rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", - dag.VertexName(v), err, debug.Stack()) - - // Call the panic wrapper - panicwrap.Panic(v, err) - }() - - walker.EnterVertex(v) - defer walker.ExitVertex(v, rerr) - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeSubPath impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) - defer walker.ExitPath(pn.Path()) - } - - // If the node is eval-able, then evaluate it. - if ev, ok := v.(GraphNodeEvalable); ok { - tree := ev.EvalTree() - if tree == nil { - panic(fmt.Sprintf( - "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v)) - } - - // Allow the walker to change our tree if needed. Eval, - // then callback with the output. - log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) - - tree = walker.EnterEvalTree(v, tree) - output, err := Eval(tree, vertexCtx) - if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf( - "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph", - path, - dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) - - g, err := ev.DynamicExpand(vertexCtx) - if err != nil { - rerr = err - return - } - if g != nil { - // Walk the subgraph - if rerr = g.walk(walker); rerr != nil { - return - } - } - } - - // If the node has a subgraph, then walk the subgraph - if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf( - "[TRACE] vertex '%s.%s': walking subgraph", - path, - dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - - if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { - return - } - } - - return nil - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go deleted file mode 100644 index 6374bb9..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(path []string) (*Graph, error) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - Validate bool - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { - g := &Graph{Path: path} - - debugName := "graph.json" - if b.Name != "" { - debugName = b.Name + "-" + debugName - } - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - - for _, step := range b.Steps { - if step == nil { - continue - } - - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - - debugOp := g.DebugOperation(stepName, "") - err := step.Transform(g) - - errMsg := "" - if err != nil { - errMsg = err.Error() - } - debugOp.End(errMsg) - - log.Printf( - "[TRACE] Graph after step %T:\n\n%s", - step, g.StringWithNodeTypes()) - - if err != nil { - return g, err - } - } - - // Validate the graph structure - if b.Validate { - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - return nil, err - } - } - - return g, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go deleted file mode 100644 index 38a90f2..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ /dev/null @@ -1,141 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree - - // Diff is the diff to apply. - Diff *Diff - - // State is the current state - State *State - - // Providers is the list of providers supported. - Providers []string - - // Provisioners is the list of provisioners supported. - Provisioners []string - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []string - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Destroy, if true, represents a pure destroy operation - Destroy bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeApplyableResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the nodes represented in the diff. - &DiffTransformer{ - Concrete: concreteResource, - - Diff: b.Diff, - Module: b.Module, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{Module: b.Module, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, - - // Destruction ordering - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, - GraphTransformIf( - func() bool { return !b.Destroy }, - &CBDEdgeTransformer{Module: b.Module, State: b.State}, - ), - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, - &ProvisionerTransformer{}, - - // Add root variables - &RootVariableTransformer{Module: b.Module}, - - // Add the outputs - &OutputTransformer{Module: b.Module}, - - // Add module variables - &ModuleVariableTransformer{Module: b.Module}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{}, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go deleted file mode 100644 index 014b348..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for -// planning a pure-destroy. -// -// Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. -type DestroyPlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree - - // State is the current state - State *State - - // Targets are resources to target - Targets []string - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "DestroyPlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodePlanDestroyableResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the nodes represented in the state. - &StateTransformer{ - Concrete: concreteResource, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, - - // Destruction ordering. We require this only so that - // targeting below will prune the correct things. - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, - - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. - &TargetsTransformer{Targets: b.Targets}, - - // Single root - &RootTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go deleted file mode 100644 index 7070c59..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ /dev/null @@ -1,76 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// ImportGraphBuilder implements GraphBuilder and is responsible for building -// a graph for importing resources into Terraform. This is a much, much -// simpler graph than a normal configuration graph. -type ImportGraphBuilder struct { - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget - - // Module is the module to add to the graph. See ImportOpts.Module. - Module *module.Tree - - // Providers is the list of providers supported. - Providers []string -} - -// Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "ImportGraphBuilder", - }).Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *ImportGraphBuilder) Steps() []GraphTransformer { - // Get the module. If we don't have one, we just use an empty tree - // so that the transform still works but does nothing. - mod := b.Module - if mod == nil { - mod = module.NewEmptyTree() - } - - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Module: mod}, - - // Add the import steps - &ImportStateTransformer{Targets: b.ImportTargets}, - - // Provider-related transformations - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: mod}, - - // This validates that the providers only depend on variables - &ImportProviderValidateTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Optimize - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go deleted file mode 100644 index 10fd8b1..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go +++ /dev/null @@ -1,30 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// InputGraphBuilder creates the graph for the input operation. -// -// Unlike other graph builders, this is a function since it currently modifies -// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be -// modified and should not be used for any other operations. -func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // convert this to an InputPlan - p.Input = true - - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - // We purposely don't set any more concrete fields since the remainder - // should be no-ops. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go deleted file mode 100644 index 9c7e4c1..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ /dev/null @@ -1,178 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// PlanGraphBuilder implements GraphBuilder and is responsible for building -// a graph for planning (creating a Terraform Diff). -// -// The primary difference between this graph and others: -// -// * Based on the config since it represents the target state -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type PlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree - - // State is the current state - State *State - - // Providers is the list of providers supported. - Providers []string - - // Provisioners is the list of provisioners supported. - Provisioners []string - - // Targets are resources to target - Targets []string - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool - - // Input represents that this builder is for an Input operation. - Input bool - - // CustomConcrete can be set to customize the node types created - // for various parts of the plan. This is useful in order to customize - // the plan behavior. - CustomConcrete bool - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceNodeFunc - - once sync.Once -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - b.once.Do(b.init) - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Module: b.Module, - }, - - // Add the outputs - &OutputTransformer{Module: b.Module}, - - // Add orphan resources - &OrphanResourceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Module: b.Module, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Add root variables - &RootVariableTransformer{Module: b.Module}, - - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, - - // Provisioner-related transformations. Only add these if requested. - GraphTransformIf( - func() bool { return b.Provisioners != nil }, - GraphTransformMulti( - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, - &ProvisionerTransformer{}, - ), - ), - - // Add module variables - &ModuleVariableTransformer{ - Module: b.Module, - Input: b.Input, - }, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} - -func (b *PlanGraphBuilder) init() { - // Do nothing if the user requests customizing the fields - if b.CustomConcrete { - return - } - - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go deleted file mode 100644 index 3d3e968..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ /dev/null @@ -1,171 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// RefreshGraphBuilder implements GraphBuilder and is responsible for building -// a graph for refreshing (updating the Terraform state). -// -// The primary difference between this graph and others: -// -// * Based on the state since it represents the only resources that -// need to be refreshed. -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type RefreshGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree - - // State is the current state - State *State - - // Providers is the list of providers supported. - Providers []string - - // Targets are resources to target - Targets []string - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "RefreshGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableManagedResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, - } - } - - concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResource: a, - } - } - - concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableDataResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, - } - } - - steps := []GraphTransformer{ - // Creates all the managed resources that aren't in the state, but only if - // we have a state already. No resources in state means there's not - // anything to refresh. - func() GraphTransformer { - if b.State.HasResources() { - return &ConfigTransformer{ - Concrete: concreteManagedResource, - Module: b.Module, - Unique: true, - ModeFilter: true, - Mode: config.ManagedResourceMode, - } - } - log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") - return nil - }(), - - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: concreteDataResource, - Module: b.Module, - Unique: true, - ModeFilter: true, - Mode: config.DataResourceMode, - }, - - // Add any fully-orphaned resources from config (ones that have been - // removed completely, not ones that are just orphaned due to a scaled-in - // count. - &OrphanResourceTransformer{ - Concrete: concreteManagedResourceInstance, - State: b.State, - Module: b.Module, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, - - // Add root variables - &RootVariableTransformer{Module: b.Module}, - - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, - - // Add the outputs - &OutputTransformer{Module: b.Module}, - - // Add module variables - &ModuleVariableTransformer{Module: b.Module}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go deleted file mode 100644 index 645ec7b..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go +++ /dev/null @@ -1,36 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// ValidateGraphBuilder creates the graph for the validate operation. -// -// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that -// we only have to validate what we'd normally plan anyways. The -// PlanGraphBuilder given will be modified so it shouldn't be used for anything -// else after calling this function. -func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, - } - } - - // We purposely don't set any other concrete types since they don't - // require validation. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go deleted file mode 100644 index 73e3821..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go deleted file mode 100644 index 2897eb5..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// GraphNodeSubPath says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeSubPath interface { - Path() []string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go deleted file mode 100644 index 34ce6f6..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EnterPath([]string) EvalContext - ExitPath([]string) - EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, error) - EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) error -} - -// GrpahWalkerPanicwrapper can be optionally implemented to catch panics -// that occur while walking the graph. This is not generally recommended -// since panics should crash Terraform and result in a bug report. However, -// this is particularly useful for situations like the shadow graph where -// you don't ever want to cause a panic. -type GraphWalkerPanicwrapper interface { - GraphWalker - - // Panic is called when a panic occurs. This will halt the panic from - // propogating so if the walker wants it to crash still it should panic - // again. This is called from within a defer so runtime/debug.Stack can - // be used to get the stack trace of the panic. - Panic(dag.Vertex, interface{}) -} - -// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow -// the panics. This doesn't lose the panics since the panics are still -// returned as errors as part of a graph walk. -func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { - return &graphWalkerPanicwrapper{ - GraphWalker: w, - } -} - -type graphWalkerPanicwrapper struct { - GraphWalker -} - -func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath([]string) {} -func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} -func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go deleted file mode 100644 index e63b460..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ /dev/null @@ -1,157 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/dag" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - Operation walkOperation - StopContext context.Context - - // Outputs, do not set these. Do not read these while the graph - // is being walked. - ValidationWarnings []string - ValidationErrors []error - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - interpolaterVars map[string]map[string]interface{} - interpolaterVarLock sync.Mutex - providerCache map[string]ResourceProvider - providerConfigCache map[string]*ResourceConfig - providerLock sync.Mutex - provisionerCache map[string]ResourceProvisioner - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { - w.once.Do(w.init) - - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := PathCacheKey(path) - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - // Setup the variables for this interpolater - variables := make(map[string]interface{}) - if len(path) <= 1 { - for k, v := range w.Context.variables { - variables[k] = v - } - } - w.interpolaterVarLock.Lock() - if m, ok := w.interpolaterVars[key]; ok { - for k, v := range m { - variables[k] = v - } - } - w.interpolaterVars[key] = variables - w.interpolaterVarLock.Unlock() - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - PathValue: path, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - Components: w.Context.components, - ProviderCache: w.providerCache, - ProviderConfigCache: w.providerConfigCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - DiffValue: w.Context.diff, - DiffLock: &w.Context.diffLock, - StateValue: w.Context.state, - StateLock: &w.Context.stateLock, - Interpolater: &Interpolater{ - Operation: w.Operation, - Meta: w.Context.meta, - Module: w.Context.module, - State: w.Context.state, - StateLock: &w.Context.stateLock, - VariableValues: variables, - VariableValuesLock: &w.interpolaterVarLock, - }, - InterpolaterVars: w.interpolaterVars, - InterpolaterVarLock: &w.interpolaterVarLock, - } - - w.contexts[key] = ctx - return ctx -} - -func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", - w.Operation, dag.VertexName(v)) - - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - - // We want to filter the evaluation tree to only include operations - // that belong in this operation. - return EvalFilter(n, EvalNodeFilterOp(w.Operation)) -} - -func (w *ContextGraphWalker) ExitEvalTree( - v dag.Vertex, output interface{}, err error) error { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", - w.Operation, dag.VertexName(v)) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // Try to get a validation error out of it. If its not a validation - // error, then just record the normal error. - verr, ok := err.(*EvalValidateError) - if !ok { - return err - } - - for _, msg := range verr.Warnings { - w.ValidationWarnings = append( - w.ValidationWarnings, - fmt.Sprintf("%s: %s", dag.VertexName(v), msg)) - } - for _, e := range verr.Errors { - w.ValidationErrors = append( - w.ValidationErrors, - errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e)) - } - - return nil -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext, 5) - w.providerCache = make(map[string]ResourceProvider, 5) - w.providerConfigCache = make(map[string]*ResourceConfig, 5) - w.provisionerCache = make(map[string]ResourceProvisioner, 5) - w.interpolaterVars = make(map[string]map[string]interface{}, 5) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go deleted file mode 100644 index 3fb3748..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -//go:generate stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkInput - walkApply - walkPlan - walkPlanDestroy - walkRefresh - walkValidate - walkDestroy - walkImport -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go deleted file mode 100644 index e97b485..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. - -package terraform - -import "fmt" - -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" - -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} - -func (i GraphType) String() string { - if i >= GraphType(len(_GraphType_index)-1) { - return fmt.Sprintf("GraphType(%d)", i) - } - return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go deleted file mode 100644 index ab11e8e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ /dev/null @@ -1,137 +0,0 @@ -package terraform - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after a single - // resource is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) - PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a single resource - // resource is diffed. - PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) - PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) - - // Provisioning hooks - // - // All should be self-explanatory. ProvisionOutput is called with - // output sent back by the provisioners. This will be called multiple - // times as output comes in, but each call should represent a line of - // output. The ProvisionOutput method cannot control whether the - // hook continues running. - PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PreProvision(*InstanceInfo, string) (HookAction, error) - PostProvision(*InstanceInfo, string, error) (HookAction, error) - ProvisionOutput(*InstanceInfo, string, string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - - // PostStateUpdate is called after the state is updated. - PostStateUpdate(*State) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // a single resource's state is being improted. - PreImportState(*InstanceInfo, string) (HookAction, error) - PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput( - *InstanceInfo, string, string) { -} - -func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(*State) (HookAction, error) { - return HookActionContinue, nil -} - -// handleHook turns hook actions into panics. This lets you use the -// panic/recover mechanism in Go as a flow control mechanism for hook -// actions. -func handleHook(a HookAction, err error) { - if err != nil { - // TODO: handle errors - } - - switch a { - case HookActionContinue: - return - case HookActionHalt: - panic(HookActionHalt) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go deleted file mode 100644 index 0e46400..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ /dev/null @@ -1,245 +0,0 @@ -package terraform - -import "sync" - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyInfo *InstanceInfo - PreApplyDiff *InstanceDiff - PreApplyState *InstanceState - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyInfo *InstanceInfo - PostApplyState *InstanceState - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) - - PreDiffCalled bool - PreDiffInfo *InstanceInfo - PreDiffState *InstanceState - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffInfo *InstanceInfo - PostDiffDiff *InstanceDiff - PostDiffReturn HookAction - PostDiffError error - - PreProvisionResourceCalled bool - PreProvisionResourceInfo *InstanceInfo - PreProvisionInstanceState *InstanceState - PreProvisionResourceReturn HookAction - PreProvisionResourceError error - - PostProvisionResourceCalled bool - PostProvisionResourceInfo *InstanceInfo - PostProvisionInstanceState *InstanceState - PostProvisionResourceReturn HookAction - PostProvisionResourceError error - - PreProvisionCalled bool - PreProvisionInfo *InstanceInfo - PreProvisionProvisionerId string - PreProvisionReturn HookAction - PreProvisionError error - - PostProvisionCalled bool - PostProvisionInfo *InstanceInfo - PostProvisionProvisionerId string - PostProvisionErrorArg error - PostProvisionReturn HookAction - PostProvisionError error - - ProvisionOutputCalled bool - ProvisionOutputInfo *InstanceInfo - ProvisionOutputProvisionerId string - ProvisionOutputMessage string - - PostRefreshCalled bool - PostRefreshInfo *InstanceInfo - PostRefreshState *InstanceState - PostRefreshReturn HookAction - PostRefreshError error - - PreRefreshCalled bool - PreRefreshInfo *InstanceInfo - PreRefreshState *InstanceState - PreRefreshReturn HookAction - PreRefreshError error - - PreImportStateCalled bool - PreImportStateInfo *InstanceInfo - PreImportStateId string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateInfo *InstanceInfo - PostImportStateState []*InstanceState - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyInfo = n - h.PreApplyDiff = d - h.PreApplyState = s - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyInfo = n - h.PostApplyState = s - h.PostApplyError = e - - if h.PostApplyFn != nil { - return h.PostApplyFn(n, s, e) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffInfo = n - h.PreDiffState = s - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffInfo = n - h.PostDiffDiff = d - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionResourceCalled = true - h.PreProvisionResourceInfo = n - h.PreProvisionInstanceState = s - return h.PreProvisionResourceReturn, h.PreProvisionResourceError -} - -func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionResourceCalled = true - h.PostProvisionResourceInfo = n - h.PostProvisionInstanceState = s - return h.PostProvisionResourceReturn, h.PostProvisionResourceError -} - -func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionCalled = true - h.PreProvisionInfo = n - h.PreProvisionProvisionerId = provId - return h.PreProvisionReturn, h.PreProvisionError -} - -func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionCalled = true - h.PostProvisionInfo = n - h.PostProvisionProvisionerId = provId - h.PostProvisionErrorArg = err - return h.PostProvisionReturn, h.PostProvisionError -} - -func (h *MockHook) ProvisionOutput( - n *InstanceInfo, - provId string, - msg string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputInfo = n - h.ProvisionOutputProvisionerId = provId - h.ProvisionOutputMessage = msg -} - -func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshInfo = n - h.PreRefreshState = s - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshInfo = n - h.PostRefreshState = s - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateInfo = info - h.PreImportStateId = id - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateInfo = info - h.PostImportStateState = s - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = s - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go deleted file mode 100644 index 104d009..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "sync/atomic" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { -} - -func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - return HookActionHalt, nil - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go deleted file mode 100644 index 0895971..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -//go:generate stringer -type=InstanceType instancetype.go - -// InstanceType is an enum of the various types of instances store in the State -type InstanceType int - -const ( - TypeInvalid InstanceType = iota - TypePrimary - TypeTainted - TypeDeposed -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go deleted file mode 100644 index f69267c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. - -package terraform - -import "fmt" - -const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" - -var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} - -func (i InstanceType) String() string { - if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) - } - return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go deleted file mode 100644 index 22ddce6..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ /dev/null @@ -1,799 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "os" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/flatmap" -) - -const ( - // VarEnvPrefix is the prefix of variables that are read from - // the environment to set variables here. - VarEnvPrefix = "TF_VAR_" -) - -// Interpolater is the structure responsible for determining the values -// for interpolations such as `aws_instance.foo.bar`. -type Interpolater struct { - Operation walkOperation - Meta *ContextMeta - Module *module.Tree - State *State - StateLock *sync.RWMutex - VariableValues map[string]interface{} - VariableValuesLock *sync.Mutex -} - -// InterpolationScope is the current scope of execution. This is required -// since some variables which are interpolated are dependent on what we're -// operating on and where we are. -type InterpolationScope struct { - Path []string - Resource *Resource -} - -// Values returns the values for all the variables in the given map. -func (i *Interpolater) Values( - scope *InterpolationScope, - vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { - if scope == nil { - scope = &InterpolationScope{} - } - - result := make(map[string]ast.Variable, len(vars)) - - // Copy the default variables - if i.Module != nil && scope != nil { - mod := i.Module - if len(scope.Path) > 1 { - mod = i.Module.Child(scope.Path[1:]) - } - for _, v := range mod.Config().Variables { - // Set default variables - if v.Default == nil { - continue - } - - n := fmt.Sprintf("var.%s", v.Name) - variable, err := hil.InterfaceToVariable(v.Default) - if err != nil { - return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) - } - - result[n] = variable - } - } - - for n, rawV := range vars { - var err error - switch v := rawV.(type) { - case *config.CountVariable: - err = i.valueCountVar(scope, n, v, result) - case *config.ModuleVariable: - err = i.valueModuleVar(scope, n, v, result) - case *config.PathVariable: - err = i.valuePathVar(scope, n, v, result) - case *config.ResourceVariable: - err = i.valueResourceVar(scope, n, v, result) - case *config.SelfVariable: - err = i.valueSelfVar(scope, n, v, result) - case *config.SimpleVariable: - err = i.valueSimpleVar(scope, n, v, result) - case *config.TerraformVariable: - err = i.valueTerraformVar(scope, n, v, result) - case *config.UserVariable: - err = i.valueUserVar(scope, n, v, result) - default: - err = fmt.Errorf("%s: unknown variable type: %T", n, rawV) - } - - if err != nil { - return nil, err - } - } - - return result, nil -} - -func (i *Interpolater) valueCountVar( - scope *InterpolationScope, - n string, - v *config.CountVariable, - result map[string]ast.Variable) error { - switch v.Type { - case config.CountValueIndex: - if scope.Resource == nil { - return fmt.Errorf("%s: count.index is only valid within resources", n) - } - result[n] = ast.Variable{ - Value: scope.Resource.CountIndex, - Type: ast.TypeInt, - } - return nil - default: - return fmt.Errorf("%s: unknown count type: %#v", n, v.Type) - } -} - -func unknownVariable() ast.Variable { - return ast.Variable{ - Type: ast.TypeUnknown, - Value: config.UnknownVariableValue, - } -} - -func unknownValue() string { - return hil.UnknownValue -} - -func (i *Interpolater) valueModuleVar( - scope *InterpolationScope, - n string, - v *config.ModuleVariable, - result map[string]ast.Variable) error { - - // Build the path to the child module we want - path := make([]string, len(scope.Path), len(scope.Path)+1) - copy(path, scope.Path) - path = append(path, v.Name) - - // Grab the lock so that if other interpolations are running or - // state is being modified, we'll be safe. - i.StateLock.RLock() - defer i.StateLock.RUnlock() - - // Get the module where we're looking for the value - mod := i.State.ModuleByPath(path) - if mod == nil { - // If the module doesn't exist, then we can return an empty string. - // This happens usually only in Refresh() when we haven't populated - // a state. During validation, we semantically verify that all - // modules reference other modules, and graph ordering should - // ensure that the module is in the state, so if we reach this - // point otherwise it really is a panic. - result[n] = unknownVariable() - - // During apply this is always an error - if i.Operation == walkApply { - return fmt.Errorf( - "Couldn't find module %q for var: %s", - v.Name, v.FullKey()) - } - } else { - // Get the value from the outputs - if outputState, ok := mod.Outputs[v.Field]; ok { - output, err := hil.InterfaceToVariable(outputState.Value) - if err != nil { - return err - } - result[n] = output - } else { - // Same reasons as the comment above. - result[n] = unknownVariable() - - // During apply this is always an error - if i.Operation == walkApply { - return fmt.Errorf( - "Couldn't find output %q for module var: %s", - v.Field, v.FullKey()) - } - } - } - - return nil -} - -func (i *Interpolater) valuePathVar( - scope *InterpolationScope, - n string, - v *config.PathVariable, - result map[string]ast.Variable) error { - switch v.Type { - case config.PathValueCwd: - wd, err := os.Getwd() - if err != nil { - return fmt.Errorf( - "Couldn't get cwd for var %s: %s", - v.FullKey(), err) - } - - result[n] = ast.Variable{ - Value: wd, - Type: ast.TypeString, - } - case config.PathValueModule: - if t := i.Module.Child(scope.Path[1:]); t != nil { - result[n] = ast.Variable{ - Value: t.Config().Dir, - Type: ast.TypeString, - } - } - case config.PathValueRoot: - result[n] = ast.Variable{ - Value: i.Module.Config().Dir, - Type: ast.TypeString, - } - default: - return fmt.Errorf("%s: unknown path type: %#v", n, v.Type) - } - - return nil - -} - -func (i *Interpolater) valueResourceVar( - scope *InterpolationScope, - n string, - v *config.ResourceVariable, - result map[string]ast.Variable) error { - // If we're computing all dynamic fields, then module vars count - // and we mark it as computed. - if i.Operation == walkValidate { - result[n] = unknownVariable() - return nil - } - - var variable *ast.Variable - var err error - - if v.Multi && v.Index == -1 { - variable, err = i.computeResourceMultiVariable(scope, v) - } else { - variable, err = i.computeResourceVariable(scope, v) - } - - if err != nil { - return err - } - - if variable == nil { - // During the input walk we tolerate missing variables because - // we haven't yet had a chance to refresh state, so dynamic data may - // not yet be complete. - // If it truly is missing, we'll catch it on a later walk. - // This applies only to graph nodes that interpolate during the - // config walk, e.g. providers. - if i.Operation == walkInput || i.Operation == walkRefresh { - result[n] = unknownVariable() - return nil - } - - return fmt.Errorf("variable %q is nil, but no error was reported", v.Name) - } - - result[n] = *variable - return nil -} - -func (i *Interpolater) valueSelfVar( - scope *InterpolationScope, - n string, - v *config.SelfVariable, - result map[string]ast.Variable) error { - if scope == nil || scope.Resource == nil { - return fmt.Errorf( - "%s: invalid scope, self variables are only valid on resources", n) - } - - rv, err := config.NewResourceVariable(fmt.Sprintf( - "%s.%s.%d.%s", - scope.Resource.Type, - scope.Resource.Name, - scope.Resource.CountIndex, - v.Field)) - if err != nil { - return err - } - - return i.valueResourceVar(scope, n, rv, result) -} - -func (i *Interpolater) valueSimpleVar( - scope *InterpolationScope, - n string, - v *config.SimpleVariable, - result map[string]ast.Variable) error { - // This error message includes some information for people who - // relied on this for their template_file data sources. We should - // remove this at some point but there isn't any rush. - return fmt.Errorf( - "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+ - "then you must escape the interpolation with two dollar signs. For\n"+ - "example: ${a} becomes $${a}.", - n, n) -} - -func (i *Interpolater) valueTerraformVar( - scope *InterpolationScope, - n string, - v *config.TerraformVariable, - result map[string]ast.Variable) error { - - // "env" is supported for backward compatibility, but it's deprecated and - // so we won't advertise it as being allowed in the error message. It will - // be removed in a future version of Terraform. - if v.Field != "workspace" && v.Field != "env" { - return fmt.Errorf( - "%s: only supported key for 'terraform.X' interpolations is 'workspace'", n) - } - - if i.Meta == nil { - return fmt.Errorf( - "%s: internal error: nil Meta. Please report a bug.", n) - } - - result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env} - return nil -} - -func (i *Interpolater) valueUserVar( - scope *InterpolationScope, - n string, - v *config.UserVariable, - result map[string]ast.Variable) error { - i.VariableValuesLock.Lock() - defer i.VariableValuesLock.Unlock() - val, ok := i.VariableValues[v.Name] - if ok { - varValue, err := hil.InterfaceToVariable(val) - if err != nil { - return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", - v.Name, val, err) - } - result[n] = varValue - return nil - } - - if _, ok := result[n]; !ok && i.Operation == walkValidate { - result[n] = unknownVariable() - return nil - } - - // Look up if we have any variables with this prefix because - // those are map overrides. Include those. - for k, val := range i.VariableValues { - if strings.HasPrefix(k, v.Name+".") { - keyComponents := strings.Split(k, ".") - overrideKey := keyComponents[len(keyComponents)-1] - - mapInterface, ok := result["var."+v.Name] - if !ok { - return fmt.Errorf("override for non-existent variable: %s", v.Name) - } - - mapVariable := mapInterface.Value.(map[string]ast.Variable) - - varValue, err := hil.InterfaceToVariable(val) - if err != nil { - return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", - v.Name, val, err) - } - mapVariable[overrideKey] = varValue - } - } - - return nil -} - -func (i *Interpolater) computeResourceVariable( - scope *InterpolationScope, - v *config.ResourceVariable) (*ast.Variable, error) { - id := v.ResourceId() - if v.Multi { - id = fmt.Sprintf("%s.%d", id, v.Index) - } - - i.StateLock.RLock() - defer i.StateLock.RUnlock() - - unknownVariable := unknownVariable() - - // These variables must be declared early because of the use of GOTO - var isList bool - var isMap bool - - // Get the information about this resource variable, and verify - // that it exists and such. - module, cr, err := i.resourceVariableInfo(scope, v) - if err != nil { - return nil, err - } - - // If we're requesting "count" its a special variable that we grab - // directly from the config itself. - if v.Field == "count" { - var count int - if cr != nil { - count, err = cr.Count() - } else { - count, err = i.resourceCountMax(module, cr, v) - } - if err != nil { - return nil, fmt.Errorf( - "Error reading %s count: %s", - v.ResourceId(), - err) - } - - return &ast.Variable{Type: ast.TypeInt, Value: count}, nil - } - - // Get the resource out from the state. We know the state exists - // at this point and if there is a state, we expect there to be a - // resource with the given name. - var r *ResourceState - if module != nil && len(module.Resources) > 0 { - var ok bool - r, ok = module.Resources[id] - if !ok && v.Multi && v.Index == 0 { - r, ok = module.Resources[v.ResourceId()] - } - if !ok { - r = nil - } - } - if r == nil || r.Primary == nil { - if i.Operation == walkApply || i.Operation == walkPlan { - return nil, fmt.Errorf( - "Resource '%s' not found for variable '%s'", - v.ResourceId(), - v.FullKey()) - } - - // If we have no module in the state yet or count, return empty. - // NOTE(@mitchellh): I actually don't know why this is here. During - // a refactor I kept this here to maintain the same behavior, but - // I'm not sure why its here. - if module == nil || len(module.Resources) == 0 { - return nil, nil - } - - goto MISSING - } - - if attr, ok := r.Primary.Attributes[v.Field]; ok { - v, err := hil.InterfaceToVariable(attr) - return &v, err - } - - // computed list or map attribute - _, isList = r.Primary.Attributes[v.Field+".#"] - _, isMap = r.Primary.Attributes[v.Field+".%"] - if isList || isMap { - variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) - return &variable, err - } - - // At apply time, we can't do the "maybe has it" check below - // that we need for plans since parent elements might be computed. - // Therefore, it is an error and we're missing the key. - // - // TODO: test by creating a state and configuration that is referencing - // a non-existent variable "foo.bar" where the state only has "foo" - // and verify plan works, but apply doesn't. - if i.Operation == walkApply || i.Operation == walkDestroy { - goto MISSING - } - - // We didn't find the exact field, so lets separate the dots - // and see if anything along the way is a computed set. i.e. if - // we have "foo.0.bar" as the field, check to see if "foo" is - // a computed list. If so, then the whole thing is computed. - if parts := strings.Split(v.Field, "."); len(parts) > 1 { - for i := 1; i < len(parts); i++ { - // Lists and sets make this - key := fmt.Sprintf("%s.#", strings.Join(parts[:i], ".")) - if attr, ok := r.Primary.Attributes[key]; ok { - v, err := hil.InterfaceToVariable(attr) - return &v, err - } - - // Maps make this - key = fmt.Sprintf("%s", strings.Join(parts[:i], ".")) - if attr, ok := r.Primary.Attributes[key]; ok { - v, err := hil.InterfaceToVariable(attr) - return &v, err - } - } - } - -MISSING: - // Validation for missing interpolations should happen at a higher - // semantic level. If we reached this point and don't have variables, - // just return the computed value. - if scope == nil && scope.Resource == nil { - return &unknownVariable, nil - } - - // If the operation is refresh, it isn't an error for a value to - // be unknown. Instead, we return that the value is computed so - // that the graph can continue to refresh other nodes. It doesn't - // matter because the config isn't interpolated anyways. - // - // For a Destroy, we're also fine with computed values, since our goal is - // only to get destroy nodes for existing resources. - // - // For an input walk, computed values are okay to return because we're only - // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { - return &unknownVariable, nil - } - - return nil, fmt.Errorf( - "Resource '%s' does not have attribute '%s' "+ - "for variable '%s'", - id, - v.Field, - v.FullKey()) -} - -func (i *Interpolater) computeResourceMultiVariable( - scope *InterpolationScope, - v *config.ResourceVariable) (*ast.Variable, error) { - i.StateLock.RLock() - defer i.StateLock.RUnlock() - - unknownVariable := unknownVariable() - - // If we're only looking for input, we don't need to expand a - // multi-variable. This prevents us from encountering things that should be - // known but aren't because the state has yet to be refreshed. - if i.Operation == walkInput { - return &unknownVariable, nil - } - - // Get the information about this resource variable, and verify - // that it exists and such. - module, cr, err := i.resourceVariableInfo(scope, v) - if err != nil { - return nil, err - } - - // Get the keys for all the resources that are created for this resource - countMax, err := i.resourceCountMax(module, cr, v) - if err != nil { - return nil, err - } - - // If count is zero, we return an empty list - if countMax == 0 { - return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil - } - - // If we have no module in the state yet or count, return unknown - if module == nil || len(module.Resources) == 0 { - return &unknownVariable, nil - } - - var values []interface{} - for idx := 0; idx < countMax; idx++ { - id := fmt.Sprintf("%s.%d", v.ResourceId(), idx) - - // ID doesn't have a trailing index. We try both here, but if a value - // without a trailing index is found we prefer that. This choice - // is for legacy reasons: older versions of TF preferred it. - if id == v.ResourceId()+".0" { - potential := v.ResourceId() - if _, ok := module.Resources[potential]; ok { - id = potential - } - } - - r, ok := module.Resources[id] - if !ok { - continue - } - - if r.Primary == nil { - continue - } - - if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { - values = append(values, singleAttr) - continue - } - - // computed list or map attribute - _, isList := r.Primary.Attributes[v.Field+".#"] - _, isMap := r.Primary.Attributes[v.Field+".%"] - if !(isList || isMap) { - continue - } - multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) - if err != nil { - return nil, err - } - - values = append(values, multiAttr) - } - - if len(values) == 0 { - // If the operation is refresh, it isn't an error for a value to - // be unknown. Instead, we return that the value is computed so - // that the graph can continue to refresh other nodes. It doesn't - // matter because the config isn't interpolated anyways. - // - // For a Destroy, we're also fine with computed values, since our goal is - // only to get destroy nodes for existing resources. - // - // For an input walk, computed values are okay to return because we're only - // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { - return &unknownVariable, nil - } - - return nil, fmt.Errorf( - "Resource '%s' does not have attribute '%s' "+ - "for variable '%s'", - v.ResourceId(), - v.Field, - v.FullKey()) - } - - variable, err := hil.InterfaceToVariable(values) - return &variable, err -} - -func (i *Interpolater) interpolateComplexTypeAttribute( - resourceID string, - attributes map[string]string) (ast.Variable, error) { - - // We can now distinguish between lists and maps in state by the count field: - // - lists (and by extension, sets) use the traditional .# notation - // - maps use the newer .% notation - // Consequently here we can decide how to deal with the keys appropriately - // based on whether the type is a map of list. - if lengthAttr, isList := attributes[resourceID+".#"]; isList { - log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)", - resourceID, lengthAttr) - - // In Terraform's internal dotted representation of list-like attributes, the - // ".#" count field is marked as unknown to indicate "this whole list is - // unknown". We must honor that meaning here so computed references can be - // treated properly during the plan phase. - if lengthAttr == config.UnknownVariableValue { - return unknownVariable(), nil - } - - expanded := flatmap.Expand(attributes, resourceID) - return hil.InterfaceToVariable(expanded) - } - - if lengthAttr, isMap := attributes[resourceID+".%"]; isMap { - log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)", - resourceID, lengthAttr) - - // In Terraform's internal dotted representation of map attributes, the - // ".%" count field is marked as unknown to indicate "this whole list is - // unknown". We must honor that meaning here so computed references can be - // treated properly during the plan phase. - if lengthAttr == config.UnknownVariableValue { - return unknownVariable(), nil - } - - expanded := flatmap.Expand(attributes, resourceID) - return hil.InterfaceToVariable(expanded) - } - - return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID) -} - -func (i *Interpolater) resourceVariableInfo( - scope *InterpolationScope, - v *config.ResourceVariable) (*ModuleState, *config.Resource, error) { - // Get the module tree that contains our current path. This is - // either the current module (path is empty) or a child. - modTree := i.Module - if len(scope.Path) > 1 { - modTree = i.Module.Child(scope.Path[1:]) - } - - // Get the resource from the configuration so we can verify - // that the resource is in the configuration and so we can access - // the configuration if we need to. - var cr *config.Resource - for _, r := range modTree.Config().Resources { - if r.Id() == v.ResourceId() { - cr = r - break - } - } - - // Get the relevant module - module := i.State.ModuleByPath(scope.Path) - return module, cr, nil -} - -func (i *Interpolater) resourceCountMax( - ms *ModuleState, - cr *config.Resource, - v *config.ResourceVariable) (int, error) { - id := v.ResourceId() - - // If we're NOT applying, then we assume we can read the count - // from the state. Plan and so on may not have any state yet so - // we do a full interpolation. - if i.Operation != walkApply { - if cr == nil { - return 0, nil - } - - count, err := cr.Count() - if err != nil { - return 0, err - } - - return count, nil - } - - // If we have no module state in the apply walk, that suggests we've hit - // a rather awkward edge-case: the resource this variable refers to - // has count = 0 and is the only resource processed so far on this walk, - // and so we've ended up not creating any resource states yet. We don't - // create a module state until the first resource is written into it, - // so the module state doesn't exist when we get here. - // - // In this case we act as we would if we had been passed a module - // with an empty resource state map. - if ms == nil { - return 0, nil - } - - // We need to determine the list of resource keys to get values from. - // This needs to be sorted so the order is deterministic. We used to - // use "cr.Count()" but that doesn't work if the count is interpolated - // and we can't guarantee that so we instead depend on the state. - max := -1 - for k, _ := range ms.Resources { - // Get the index number for this resource - index := "" - if k == id { - // If the key is the id, then its just 0 (no explicit index) - index = "0" - } else if strings.HasPrefix(k, id+".") { - // Grab the index number out of the state - index = k[len(id+"."):] - if idx := strings.IndexRune(index, '.'); idx >= 0 { - index = index[:idx] - } - } - - // If there was no index then this resource didn't match - // the one we're looking for, exit. - if index == "" { - continue - } - - // Turn the index into an int - raw, err := strconv.ParseInt(index, 0, 0) - if err != nil { - return 0, fmt.Errorf( - "%s: error parsing index %q as int: %s", - id, index, err) - } - - // Keep track of this index if its the max - if new := int(raw); new > max { - max = new - } - } - - // If we never found any matching resources in the state, we - // have zero. - if max == -1 { - return 0, nil - } - - // The result value is "max+1" because we're returning the - // max COUNT, not the max INDEX, and we zero-index. - return max + 1, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go deleted file mode 100644 index b9f44a0..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go +++ /dev/null @@ -1,156 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/moduledeps" - "github.com/hashicorp/terraform/plugin/discovery" -) - -// ModuleTreeDependencies returns the dependencies of the tree of modules -// described by the given configuration tree and state. -// -// Both configuration and state are required because there can be resources -// implied by instances in the state that no longer exist in config. -// -// This function will panic if any invalid version constraint strings are -// present in the configuration. This is guaranteed not to happen for any -// configuration that has passed a call to Config.Validate(). -func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { - - // First we walk the configuration tree to build the overall structure - // and capture the explicit/implicit/inherited provider dependencies. - deps := moduleTreeConfigDependencies(root, nil) - - // Next we walk over the resources in the state to catch any additional - // dependencies created by existing resources that are no longer in config. - // Most things we find in state will already be present in 'deps', but - // we're interested in the rare thing that isn't. - moduleTreeMergeStateDependencies(deps, state) - - return deps -} - -func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module { - if root == nil { - // If no config is provided, we'll make a synthetic root. - // This isn't necessarily correct if we're called with a nil that - // *isn't* at the root, but in practice that can never happen. - return &moduledeps.Module{ - Name: "root", - } - } - - ret := &moduledeps.Module{ - Name: root.Name(), - } - - cfg := root.Config() - providerConfigs := cfg.ProviderConfigsByFullName() - - // Provider dependencies - { - providers := make(moduledeps.Providers, len(providerConfigs)) - - // Any providerConfigs elements are *explicit* provider dependencies, - // which is the only situation where the user might provide an actual - // version constraint. We'll take care of these first. - for fullName, pCfg := range providerConfigs { - inst := moduledeps.ProviderInstance(fullName) - versionSet := discovery.AllVersions - if pCfg.Version != "" { - versionSet = discovery.ConstraintStr(pCfg.Version).MustParse() - } - providers[inst] = moduledeps.ProviderDependency{ - Constraints: versionSet, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range cfg.Resources { - fullName := rc.ProviderFullName() - inst := moduledeps.ProviderInstance(fullName) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[fullName]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - - ret.Providers = providers - } - - childInherit := make(map[string]*config.ProviderConfig) - for k, v := range inheritProviders { - childInherit[k] = v - } - for k, v := range providerConfigs { - childInherit[k] = v - } - for _, c := range root.Children() { - ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit)) - } - - return ret -} - -func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { - if state == nil { - return - } - - findModule := func(path []string) *moduledeps.Module { - module := root - for _, name := range path[1:] { // skip initial "root" - var next *moduledeps.Module - for _, cm := range module.Children { - if cm.Name == name { - next = cm - break - } - } - - if next == nil { - // If we didn't find a next node, we'll need to make one - next = &moduledeps.Module{ - Name: name, - } - module.Children = append(module.Children, next) - } - - module = next - } - return module - } - - for _, ms := range state.Modules { - module := findModule(ms.Path) - - for _, is := range ms.Resources { - fullName := config.ResourceProviderFullName(is.Type, is.Provider) - inst := moduledeps.ProviderInstance(fullName) - if _, exists := module.Providers[inst]; !exists { - if module.Providers == nil { - module.Providers = make(moduledeps.Providers) - } - module.Providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: moduledeps.ProviderDependencyFromState, - } - } - } - } - -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go deleted file mode 100644 index bd32c79..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go +++ /dev/null @@ -1,14 +0,0 @@ -package terraform - -// NodeCountBoundary fixes any "count boundarie" in the state: resources -// that are named "foo.0" when they should be named "foo" -type NodeCountBoundary struct{} - -func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (count boundary fixup)" -} - -// GraphNodeEvalable -func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go deleted file mode 100644 index e32cea8..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -// NodeDestroyableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodeDestroyableDataResource struct { - *NodeAbstractResource -} - -// GraphNodeEvalable -func (n *NodeDestroyableDataResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Just destroy it. - var state *InstanceState - return &EvalWriteState{ - Name: stateId, - State: &state, // state is nil here - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go deleted file mode 100644 index 45129b3..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ /dev/null @@ -1,218 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// NodeRefreshableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodeRefreshableDataResource struct { - *NodeAbstractCountResource -} - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: a, - } - } - - // We also need a destroyable resource for orphans that are a result of a - // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { - // Add the config since we don't do that via transforms - a.Config = n.Config - - return &NodeDestroyableDataResource{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans. As these are orphaned refresh nodes, we add them - // directly as NodeDestroyableDataResource. - &OrphanResourceCountTransformer{ - Concrete: concreteResourceDestroyable, - Count: count, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableDataResource", - } - - return b.Build(ctx.Path()) -} - -// NodeRefreshableDataResourceInstance represents a _single_ resource instance -// that is refreshable. -type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResource -} - -// GraphNodeEvalable -func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } - - // Get the state if we have it, if not we build it - rs := n.ResourceState - if rs == nil { - rs = &ResourceState{} - } - - // If the config isn't empty we update the state - if n.Config != nil { - rs = &ResourceState{ - Type: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: n.StateReferences(), - } - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var diff *InstanceDiff - var provider ResourceProvider - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - // Always destroy the existing state first, since we must - // make sure that values from a previous read will not - // get interpolated if we end up needing to defer our - // loading until apply time. - &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, // state is nil here - }, - - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - // The rest of this pass can proceed only if there are no - // computed values in our config. - // (If there are, we'll deal with this during the plan and - // apply phases.) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { - return true, EvalEarlyExitError{} - } - - // If the config explicitly has a depends_on for this - // data source, assume the intention is to prevent - // refreshing ahead of that dependency. - if len(n.Config.DependsOn) > 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - - Then: EvalNoop{}, - }, - - // The remainder of this pass is the same as running - // a "plan" pass immediately followed by an "apply" pass, - // populating the state early so it'll be available to - // provider configurations that need this data during - // refresh/plan. - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, - }, - - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go deleted file mode 100644 index 319df1e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go +++ /dev/null @@ -1,29 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// NodeDestroyableModule represents a module destruction. -type NodeDestroyableModuleVariable struct { - PathValue []string -} - -func (n *NodeDestroyableModuleVariable) Name() string { - result := "plan-destroy" - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeDestroyableModuleVariable) Path() []string { - return n.PathValue -} - -// GraphNodeEvalable -func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode { - return &EvalDiffDestroyModule{Path: n.PathValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go deleted file mode 100644 index 63b84a9..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ /dev/null @@ -1,140 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// NodeApplyableModuleVariable represents a module variable input during -// the apply step. -type NodeApplyableModuleVariable struct { - PathValue []string - Config *config.Variable // Config is the var in the config - Value *config.RawConfig // Value is the value that is set - - Module *module.Tree // Antiquated, want to remove - - // Input is set if this graph was created for the Input operation. - Input bool -} - -func (n *NodeApplyableModuleVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() []string { - // We execute in the parent scope (above our own module) so that - // we can access the proper interpolations. - if len(n.PathValue) > 2 { - return n.PathValue[:len(n.PathValue)-1] - } - - return rootModulePath -} - -// RemovableIfNotTargeted -func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferenceGlobal -func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { - // We have to create fully qualified references because we cross - // boundaries here: our ReferenceableName is in one path and our - // References are from another path. - return true -} - -// GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableName() []string { - return []string{n.Name()} -} - -// GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []string { - // If we have no value set, we depend on nothing - if n.Value == nil { - return nil - } - - // Can't depend on anything if we're in the root - if len(n.PathValue) < 2 { - return nil - } - - // Otherwise, we depend on anything that is in our value, but - // specifically in the namespace of the parent path. - // Create the prefix based on the path - var prefix string - if p := n.Path(); len(p) > 0 { - prefix = modulePrefixStr(p) - } - - result := ReferencesFromConfig(n.Value) - return modulePrefixList(result, prefix) -} - -// GraphNodeEvalable -func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Value == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - var config *ResourceConfig - variables := make(map[string]interface{}) - - var interpolate EvalNode - - if n.Input { - interpolate = &EvalTryInterpolate{ - Config: n.Value, - Output: &config, - } - } else { - interpolate = &EvalInterpolate{ - Config: n.Value, - Output: &config, - } - } - - return &EvalSequence{ - Nodes: []EvalNode{ - interpolate, - - &EvalVariableBlock{ - Config: &config, - VariableValues: variables, - }, - - &EvalCoerceMapVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, - }, - - &EvalTypeCheckVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, - }, - - &EvalSetVariables{ - Module: &n.PathValue[len(n.PathValue)-1], - Variables: variables, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go deleted file mode 100644 index 9017a63..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - PathValue []string - Config *config.Output // Config is the output in the config -} - -func (n *NodeApplyableOutput) Name() string { - result := fmt.Sprintf("output.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeApplyableOutput) Path() []string { - return n.PathValue -} - -// RemovableIfNotTargeted -func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeTargetDownstream -func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - // If any of the direct dependencies of an output are targeted then - // the output must always be targeted as well, so its value will always - // be up-to-date at the completion of an apply walk. - return true -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableName() []string { - name := fmt.Sprintf("output.%s", n.Config.Name) - return []string{name} -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []string { - var result []string - result = append(result, n.Config.DependsOn...) - result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) - for _, v := range result { - split := strings.Split(v, "/") - for i, s := range split { - split[i] = s + ".destroy" - } - - result = append(result, strings.Join(split, "/")) - } - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkInput, walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteOutput{ - Name: n.Config.Name, - Sensitive: n.Config.Sensitive, - Value: n.Config.RawConfig, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go deleted file mode 100644 index 636a15d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ /dev/null @@ -1,35 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// NodeOutputOrphan represents an output that is an orphan. -type NodeOutputOrphan struct { - OutputName string - PathValue []string -} - -func (n *NodeOutputOrphan) Name() string { - result := fmt.Sprintf("output.%s (orphan)", n.OutputName) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeOutputOrphan) Path() []string { - return n.PathValue -} - -// GraphNodeEvalable -func (n *NodeOutputOrphan) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalDeleteOutput{ - Name: n.OutputName, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go deleted file mode 100644 index 8e2c176..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n.NameValue, n.ProviderConfig()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go deleted file mode 100644 index 6cc8365..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - NameValue string - PathValue []string - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *config.ProviderConfig -} - -func (n *NodeAbstractProvider) Name() string { - result := fmt.Sprintf("provider.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeAbstractProvider) Path() []string { - return n.PathValue -} - -// RemovableIfNotTargeted -func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []string { - if n.Config == nil { - return nil - } - - return ReferencesFromConfig(n.Config.RawConfig) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderName() string { - return n.NameValue -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig { - if n.Config == nil { - return nil - } - - return n.Config.RawConfig -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { - n.Config = c -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go deleted file mode 100644 index 25e7e62..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// NodeDisabledProvider represents a provider that is disabled. A disabled -// provider does nothing. It exists to properly set inheritance information -// for child providers. -type NodeDisabledProvider struct { - *NodeAbstractProvider -} - -func (n *NodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) -} - -// GraphNodeEvalable -func (n *NodeDisabledProvider) EvalTree() EvalNode { - var resourceConfig *ResourceConfig - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.ProviderConfig(), - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go deleted file mode 100644 index bb117c1..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue []string - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *config.ProviderConfig -} - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeProvisioner) Path() []string { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeEvalable impl. -func (n *NodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.NameValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go deleted file mode 100644 index 50bb707..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ /dev/null @@ -1,240 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeResource interface { - ResourceAddr() *ResourceAddress -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr *ResourceAddress // Addr is the address for this resource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *config.Resource // Config is the resource in the config - ResourceState *ResourceState // ResourceState is the ResourceState for this - - Targets []ResourceAddress // Set from GraphNodeTargetable -} - -func (n *NodeAbstractResource) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeAbstractResource) Path() []string { - return n.Addr.Path -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableName() []string { - // We always are referenceable as "type.name" as long as - // we have a config or address. Determine what that value is. - var id string - if n.Config != nil { - id = n.Config.Id() - } else if n.Addr != nil { - addrCopy := n.Addr.Copy() - addrCopy.Path = nil // ReferenceTransformer handles paths - addrCopy.Index = -1 // We handle indexes below - id = addrCopy.String() - } else { - // No way to determine our type.name, just return - return nil - } - - var result []string - - // Always include our own ID. This is primarily for backwards - // compatibility with states that didn't yet support the more - // specific dep string. - result = append(result, id) - - // We represent all multi-access - result = append(result, fmt.Sprintf("%s.*", id)) - - // We represent either a specific number, or all numbers - suffix := "N" - if n.Addr != nil { - idx := n.Addr.Index - if idx == -1 { - idx = 0 - } - - suffix = fmt.Sprintf("%d", idx) - } - result = append(result, fmt.Sprintf("%s.%s", id, suffix)) - - return result -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []string { - // If we have a config, that is our source of truth - if c := n.Config; c != nil { - // Grab all the references - var result []string - result = append(result, c.DependsOn...) - result = append(result, ReferencesFromConfig(c.RawCount)...) - result = append(result, ReferencesFromConfig(c.RawConfig)...) - for _, p := range c.Provisioners { - if p.When == config.ProvisionerWhenCreate { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) - } - } - - return uniqueStrings(result) - } - - // If we have state, that is our next source - if s := n.ResourceState; s != nil { - return s.Dependencies - } - - return nil -} - -// StateReferences returns the dependencies to put into the state for -// this resource. -func (n *NodeAbstractResource) StateReferences() []string { - self := n.ReferenceableName() - - // Determine what our "prefix" is for checking for references to - // ourself. - addrCopy := n.Addr.Copy() - addrCopy.Index = -1 - selfPrefix := addrCopy.String() + "." - - depsRaw := n.References() - deps := make([]string, 0, len(depsRaw)) - for _, d := range depsRaw { - // Ignore any variable dependencies - if strings.HasPrefix(d, "var.") { - continue - } - - // If this has a backup ref, ignore those for now. The old state - // file never contained those and I'd rather store the rich types we - // add in the future. - if idx := strings.IndexRune(d, '/'); idx != -1 { - d = d[:idx] - } - - // If we're referencing ourself, then ignore it - found := false - for _, s := range self { - if d == s { - found = true - } - } - if found { - continue - } - - // If this is a reference to ourself and a specific index, we keep - // it. For example, if this resource is "foo.bar" and the reference - // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. - if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { - d = d[:len(d)-2] - } - - // This is sad. The dependencies are currently in the format of - // "module.foo.bar" (the full field). This strips the field off. - if strings.HasPrefix(d, "module.") { - parts := strings.SplitN(d, ".", 3) - d = strings.Join(parts[0:2], ".") - } - - deps = append(deps, d) - } - - return deps -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() []string { - // If we have a config we prefer that above all else - if n.Config != nil { - return []string{resourceProvider(n.Config.Type, n.Config.Provider)} - } - - // If we have state, then we will use the provider from there - if n.ResourceState != nil && n.ResourceState.Provider != "" { - return []string{n.ResourceState.Provider} - } - - // Use our type - return []string{resourceProvider(n.Addr.Type, "")} -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Provisioners)) - for i, p := range n.Config.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeResource, GraphNodeAttachResourceState -func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { - return n.Addr -} - -// GraphNodeAddressable, TODO: remove, used by target, should unify -func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return n.ResourceAddr() -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { - n.Targets = targets -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { - n.ResourceState = s -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { - n.Config = c -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go deleted file mode 100644 index 573570d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go +++ /dev/null @@ -1,50 +0,0 @@ -package terraform - -// NodeAbstractCountResource should be embedded instead of NodeAbstractResource -// if the resource has a `count` value that needs to be expanded. -// -// The embedder should implement `DynamicExpand` to process the count. -type NodeAbstractCountResource struct { - *NodeAbstractResource - - // Validate, if true, will perform the validation for the count. - // This should only be turned on for the "validate" operation. - Validate bool -} - -// GraphNodeEvalable -func (n *NodeAbstractCountResource) EvalTree() EvalNode { - // We only check if the count is computed if we're not validating. - // If we're validating we allow computed counts since they just turn - // into more computed values. - var evalCountCheckComputed EvalNode - if !n.Validate { - evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - // The EvalTree for a plannable resource primarily involves - // interpolating the count since it can contain variables - // we only just received access to. - // - // With the interpolated count, we can then DynamicExpand - // into the proper number of instances. - &EvalInterpolate{Config: n.Config.RawCount}, - - // Check if the count is computed - evalCountCheckComputed, - - // If validation is enabled, perform the validation - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return n.Validate, nil - }, - - Then: &EvalValidateCount{Resource: n.Config}, - }, - - &EvalCountFixZeroOneBoundary{Resource: n.Config}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go deleted file mode 100644 index 3599782..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ /dev/null @@ -1,357 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// NodeApplyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeApplyableResource struct { - *NodeAbstractResource -} - -// GraphNodeCreator -func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { - return n.NodeAbstractResource.Addr -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeApplyableResource) References() []string { - result := n.NodeAbstractResource.References() - - // The "apply" side of a resource generally also depends on the - // destruction of its dependencies as well. For example, if a LB - // references a set of VMs with ${vm.foo.*.id}, then we must wait for - // the destruction so we get the newly updated list of VMs. - // - // The exception here is CBD. When CBD is set, we don't do this since - // it would create a cycle. By not creating a cycle, we require two - // applies since the first apply the creation step will use the OLD - // values (pre-destroy) and the second step will update. - // - // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x). - // We mimic that behavior here now and can improve upon it in the future. - // - // This behavior is tested in graph_build_apply_test.go to test ordering. - cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy - if !cbd { - // The "apply" side of a resource always depends on the destruction - // of all its dependencies in addition to the creation. - for _, v := range result { - result = append(result, v+".destroy") - } - } - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Determine the dependencies for the state. - stateDeps := n.StateReferences() - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResource) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Stop here if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diff == nil { - return true, EvalEarlyExitError{} - } - - if diff.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - // We need to re-interpolate the config here, rather than - // just using the diff's values directly, because we've - // potentially learned more variable values during the - // apply pass that weren't known when the diff was produced. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - // Make a new diff with our newly-interpolated config. - &EvalReadDataDiff{ - Info: info, - Config: &config, - Previous: &diff, - Provider: &provider, - Output: &diff, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - - &EvalUpdateStateHook{}, - }, - } -} - -func (n *NodeApplyableResource) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff, diffApply *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig - var err error - var createNew bool - var createBeforeDestroyEnabled bool - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, - }, - - // We don't want to do any destroys - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - - if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - diffApply.SetDestroy(false) - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = diffApply.GetDestroy() || diffApply.RequiresNew() - } - - createBeforeDestroyEnabled = - n.Config.Lifecycle.CreateBeforeDestroy && - destroy - - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Name: stateId, - }, - }, - - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, - &EvalDiff{ - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - Diff: &diffApply, - State: &state, - OutputDiff: &diffApply, - }, - - // Get the saved diff - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Compare the diffs - &EvalCompareDiff{ - Info: info, - One: &diff, - Two: &diffApply, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - &EvalApplyProvisioners{ - Info: info, - State: &state, - Resource: n.Config, - InterpResource: resource, - CreateNew: &createNew, - Error: &err, - When: config.ProvisionerWhenCreate, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalUndeposeState{ - Name: stateId, - State: &state, - }, - Else: &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go deleted file mode 100644 index c2efd2c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ /dev/null @@ -1,288 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// NodeDestroyResource represents a resource that is to be destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource -} - -func (n *NodeDestroyResource) Name() string { - return n.NodeAbstractResource.Name() + " (destroy)" -} - -// GraphNodeDestroyer -func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { - return n.Addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResource) CreateBeforeDestroy() bool { - // If we have no config, we just assume no - if n.Config == nil { - return false - } - - return n.Config.Lifecycle.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { - // If we have no config, do nothing since it won't affect the - // create step anyways. - if n.Config == nil { - return nil - } - - // Set CBD to true - n.Config.Lifecycle.CreateBeforeDestroy = true - - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableName() []string { - // We modify our referenceable name to have the suffix of ".destroy" - // since depending on the creation side doesn't necessarilly mean - // depending on destruction. - suffix := ".destroy" - - // If we're CBD, we also append "-cbd". This is because CBD will setup - // its own edges (in CBDEdgeTransformer). Depending on the "destroy" - // side generally doesn't mean depending on CBD as well. See GH-11349 - if n.CreateBeforeDestroy() { - suffix += "-cbd" - } - - result := n.NodeAbstractResource.ReferenceableName() - for i, v := range result { - result[i] = v + suffix - } - - return result -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []string { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil { - var result []string - for _, p := range c.Provisioners { - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - if p.When == config.ProvisionerWhenDestroy { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) - } - } - - return result - } - - return nil -} - -// GraphNodeDynamicExpandable -func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // If we have no config we do nothing - if n.Addr == nil { - return nil, nil - } - - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Start creating the steps - steps := make([]GraphTransformer, 0, 5) - - // We want deposed resources in the state to be destroyed - steps = append(steps, &DeposedTransformer{ - State: state, - View: n.Addr.stateId(), - }) - - // Target - steps = append(steps, &TargetsTransformer{ - ParsedTargets: n.Targets, - }) - - // Always end with the root being added - steps = append(steps, &RootTransformer{}) - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Name: "NodeResourceDestroy", - } - return b.Build(ctx.Path()) -} - -// GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // stateId is the ID to put into the state - stateId := n.Addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: n.Addr.Type, - uniqueExtra: "destroy", - } - - // Build the resource for eval - addr := n.Addr - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Get our state - rs := n.ResourceState - if rs == nil { - rs = &ResourceState{} - } - - var diffApply *InstanceDiff - var provider ResourceProvider - var state *InstanceState - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, - }, - - // Filter the diff so we only get the destroy - &EvalFilterDiff{ - Diff: &diffApply, - Output: &diffApply, - Destroy: true, - }, - - // If we're not destroying, then compare diffs - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply != nil && diffApply.GetDestroy() { - return true, nil - } - - return true, EvalEarlyExitError{} - }, - Then: EvalNoop{}, - }, - - // Load the instance info so we have the module path set - &EvalInstanceInfo{Info: info}, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - &EvalRequireState{ - State: &state, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, - }, - - // Run destroy provisioners if not tainted - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Tainted { - return false, nil - } - - return true, nil - }, - - Then: &EvalApplyProvisioners{ - Info: info, - State: &state, - Resource: n.Config, - InterpResource: resource, - Error: &err, - When: config.ProvisionerWhenDestroy, - }, - }, - - // If we have a provisioning error, then we just call - // the post-apply hook now. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return err != nil, nil - }, - - Then: &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - }, - - // Make sure we handle data sources properly. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if n.Addr == nil { - return false, fmt.Errorf("nil address") - } - - if n.Addr.Mode == config.DataResourceMode { - return true, nil - } - - return false, nil - }, - - Then: &EvalReadDataApply{ - Info: info, - Diff: &diffApply, - Provider: &provider, - Output: &state, - }, - Else: &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - }, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.Addr.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go deleted file mode 100644 index 52bbf88..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ /dev/null @@ -1,83 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// NodePlannableResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodePlannableResource struct { - *NodeAbstractCountResource -} - -// GraphNodeDynamicExpandable -func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodePlannableResourceInstance{ - NodeAbstractResource: a, - } - } - - // The concrete resource factory we'll use for oprhans - concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans - &OrphanResourceCountTransformer{ - Concrete: concreteResourceOrphan, - Count: count, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodePlannableResource", - } - return b.Build(ctx.Path()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go deleted file mode 100644 index 9b02362..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,53 +0,0 @@ -package terraform - -// NodePlanDestroyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlanDestroyableResource struct { - *NodeAbstractResource -} - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { - return n.Addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadState{ - Name: stateId, - Output: &state, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, - }, - &EvalWriteDiff{ - Name: stateId, - Diff: &diff, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go deleted file mode 100644 index b529569..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,190 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResource -} - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Determine the dependencies for the state. - stateDeps := n.StateReferences() - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadState{ - Name: stateId, - Output: &state, - }, - - // We need to re-interpolate the config here because some - // of the attributes may have become computed during - // earlier planning, due to other resources having - // "requires new resource" diffs. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 - - // If the configuration is complete and we - // already have a state then we don't need to - // do any further work during apply, because we - // already populated the state during refresh. - if !computed && state != nil { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - - &EvalWriteDiff{ - Name: stateId, - Diff: &diff, - }, - }, - } -} - -func (n *NodePlannableResourceInstance) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - &EvalDiff{ - Name: stateId, - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - State: &state, - OutputDiff: &diff, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - &EvalWriteDiff{ - Name: stateId, - Diff: &diff, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go deleted file mode 100644 index 73d6e41..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,54 +0,0 @@ -package terraform - -// NodePlannableResourceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceOrphan struct { - *NodeAbstractResource -} - -func (n *NodePlannableResourceOrphan) Name() string { - return n.NodeAbstractResource.Name() + " (orphan)" -} - -// GraphNodeEvalable -func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadState{ - Name: stateId, - Output: &state, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - &EvalCheckPreventDestroy{ - Resource: n.Config, - ResourceId: stateId, - Diff: &diff, - }, - &EvalWriteDiff{ - Name: stateId, - Diff: &diff, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go deleted file mode 100644 index cd4fe92..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ /dev/null @@ -1,259 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// NodeRefreshableManagedResource represents a resource that is expanabled into -// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. -type NodeRefreshableManagedResource struct { - *NodeAbstractCountResource -} - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans to make sure these resources are accounted for - // during a scale in. - &OrphanResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableManagedResource", - } - - return b.Build(ctx.Path()) -} - -// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResource -} - -// GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { - return n.Addr -} - -// GraphNodeEvalable -func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { - // Eval info is different depending on what kind of resource this is - switch mode := n.Addr.Mode; mode { - case config.ManagedResourceMode: - if n.ResourceState == nil { - return n.evalTreeManagedResourceNoState() - } - return n.evalTreeManagedResource() - - case config.DataResourceMode: - // Get the data source node. If we don't have a configuration - // then it is an orphan so we destroy it (remove it from the state). - var dn GraphNodeEvalable - if n.Config != nil { - dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: n.NodeAbstractResource, - } - } else { - dn = &NodeDestroyableDataResource{ - NodeAbstractResource: n.NodeAbstractResource, - } - } - - return dn.EvalTree() - default: - panic(fmt.Errorf("unsupported resource mode %s", mode)) - } -} - -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var state *InstanceState - - // This happened during initial development. All known cases were - // fixed and tested but as a sanity check let's assert here. - if n.ResourceState == nil { - err := fmt.Errorf( - "No resource state attached for addr: %s\n\n"+ - "This is a bug. Please report this to Terraform with your configuration\n"+ - "and state attached. Please be careful to scrub any sensitive information.", - addr) - return &EvalReturnError{Error: &err} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.ResourceState.Type, - Provider: n.ResourceState.Provider, - Dependencies: n.ResourceState.Dependencies, - State: &state, - }, - }, - } -} - -// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource -// nodes that don't have state attached. An example of where this functionality -// is useful is when a resource that already exists in state is being scaled -// out, ie: has its resource count increased. In this case, the scaled out node -// needs to be available to other nodes (namely data sources) that may depend -// on it for proper interpolation, or confusing "index out of range" errors can -// occur. -// -// The steps in this sequence are very similar to the steps carried out in -// plan, but nothing is done with the diff after it is created - it is dropped, -// and its changes are not counted in the UI. -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var state *InstanceState - var resourceConfig *ResourceConfig - - addr := n.NodeAbstractResource.Addr - stateID := addr.stateId() - info := &InstanceInfo{ - Id: stateID, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Determine the dependencies for the state. - stateDeps := n.StateReferences() - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, - &EvalReadState{ - Name: stateID, - Output: &state, - }, - &EvalDiff{ - Name: stateID, - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - State: &state, - OutputState: &state, - Stub: true, - }, - &EvalWriteState{ - Name: stateID, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go deleted file mode 100644 index f528f24..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ /dev/null @@ -1,158 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractCountResource -} - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - // Ensure we're validating - c := n.NodeAbstractCountResource - c.Validate = true - return c.EvalTree() -} - -// GraphNodeDynamicExpandable -func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count := 1 - if n.Config.RawCount.Value() != unknownValue() { - var err error - count, err = n.Config.Count() - if err != nil { - return nil, err - } - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodeValidatableResourceInstance{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeValidatableResource", - } - - return b.Build(ctx.Path()) -} - -// This represents a _single_ resource instance to validate. -type NodeValidatableResourceInstance struct { - *NodeAbstractResource -} - -// GraphNodeEvalable -func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var provider ResourceProvider - - seq := &EvalSequence{ - Nodes: []EvalNode{ - &EvalValidateResourceSelfRef{ - Addr: &addr, - Config: &n.Config.RawConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - &EvalValidateResource{ - Provider: &provider, - Config: &config, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - }, - }, - } - - // Validate all the provisioners - for _, p := range n.Config.Provisioners { - var provisioner ResourceProvisioner - var connConfig *ResourceConfig - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - }, - &EvalInterpolate{ - Config: p.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - &EvalInterpolate{ - Config: p.ConnInfo.Copy(), - Resource: resource, - Output: &connConfig, - }, - &EvalValidateProvisioner{ - Provisioner: &provisioner, - Config: &config, - ConnConfig: &connConfig, - }, - ) - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go deleted file mode 100644 index cb61a4e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Config *config.Variable -} - -func (n *NodeRootVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - return result -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableName() []string { - return []string{n.Name()} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go deleted file mode 100644 index ca99685..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/path.go +++ /dev/null @@ -1,24 +0,0 @@ -package terraform - -import ( - "crypto/md5" - "encoding/hex" -) - -// PathCacheKey returns a cache key for a module path. -// -// TODO: test -func PathCacheKey(path []string) string { - // There is probably a better way to do this, but this is working for now. - // We just create an MD5 hash of all the MD5 hashes of all the path - // elements. This gets us the property that it is unique per ordering. - hash := md5.New() - for _, p := range path { - single := md5.Sum([]byte(p)) - if _, err := hash.Write(single[:]); err != nil { - panic(err) - } - } - - return hex.EncodeToString(hash.Sum(nil)) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go deleted file mode 100644 index 51d6652..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ /dev/null @@ -1,196 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "errors" - "fmt" - "io" - "log" - "sync" - - "github.com/hashicorp/terraform/config/module" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - Diff *Diff - Module *module.Tree - State *State - Vars map[string]interface{} - Targets []string - - TerraformVersion string - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - once sync.Once -} - -// Context returns a Context with the data encapsulated in this plan. -// -// The following fields in opts are overridden by the plan: Config, -// Diff, Variables. -// -// If State is not provided, it is set from the plan. If it _is_ provided, -// it must be Equal to the state stored in plan, but may have a newer -// serial. -func (p *Plan) Context(opts *ContextOpts) (*Context, error) { - var err error - opts, err = p.contextOpts(opts) - if err != nil { - return nil, err - } - return NewContext(opts) -} - -// contextOpts mutates the given base ContextOpts in place to use input -// objects obtained from the receiving plan. -func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { - opts := base - - opts.Diff = p.Diff - opts.Module = p.Module - opts.Targets = p.Targets - opts.ProviderSHA256s = p.ProviderSHA256s - - if opts.State == nil { - opts.State = p.State - } else if !opts.State.Equal(p.State) { - // Even if we're overriding the state, it should be logically equal - // to what's in plan. The only valid change to have made by the time - // we get here is to have incremented the serial. - // - // Due to the fact that serialization may change the representation of - // the state, there is little chance that these aren't actually equal. - // Log the error condition for reference, but continue with the state - // we have. - log.Println("[WARNING] Plan state and ContextOpts state are not equal") - } - - thisVersion := VersionString() - if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { - return nil, fmt.Errorf( - "plan was created with a different version of Terraform (created with %s, but running %s)", - p.TerraformVersion, thisVersion, - ) - } - - opts.Variables = make(map[string]interface{}) - for k, v := range p.Vars { - opts.Variables[k] = v - } - - return opts, nil -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -func (p *Plan) init() { - p.once.Do(func() { - if p.Diff == nil { - p.Diff = new(Diff) - p.Diff.init() - } - - if p.State == nil { - p.State = new(State) - p.State.init() - } - - if p.Vars == nil { - p.Vars = make(map[string]interface{}) - } - }) -} - -// The format byte is prefixed into the plan file format so that we have -// the ability in the future to change the file format if we want for any -// reason. -const planFormatMagic = "tfplan" -const planFormatVersion byte = 2 - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - var result *Plan - var err error - n := 0 - - // Verify the magic bytes - magic := make([]byte, len(planFormatMagic)) - for n < len(magic) { - n, err = src.Read(magic[n:]) - if err != nil { - return nil, fmt.Errorf("error while reading magic bytes: %s", err) - } - } - if string(magic) != planFormatMagic { - return nil, fmt.Errorf("not a valid plan file") - } - - // Verify the version is something we can read - var formatByte [1]byte - n, err = src.Read(formatByte[:]) - if err != nil { - return nil, err - } - if n != len(formatByte) { - return nil, errors.New("failed to read plan version byte") - } - - if formatByte[0] != planFormatVersion { - return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0]) - } - - dec := gob.NewDecoder(src) - if err := dec.Decode(&result); err != nil { - return nil, err - } - - return result, nil -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - // Write the magic bytes so we can determine the file format later - n, err := dst.Write([]byte(planFormatMagic)) - if err != nil { - return err - } - if n != len(planFormatMagic) { - return errors.New("failed to write plan format magic bytes") - } - - // Write a version byte so we can iterate on version at some point - n, err = dst.Write([]byte{planFormatVersion}) - if err != nil { - return err - } - if n != 1 { - return errors.New("failed to write plan version byte") - } - - return gob.NewEncoder(dst).Encode(d) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go deleted file mode 100644 index 0acf0be..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ /dev/null @@ -1,360 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" -) - -// ResourceProvisionerConfig is used to pair a provisioner -// with its provided configuration. This allows us to use singleton -// instances of each ResourceProvisioner and to keep the relevant -// configuration instead of instantiating a new Provisioner for each -// resource. -type ResourceProvisionerConfig struct { - Type string - Provisioner ResourceProvisioner - Config *ResourceConfig - RawConfig *config.RawConfig - ConnInfo *config.RawConfig -} - -// Resource encapsulates a resource, its configuration, its provider, -// its current state, and potentially a desired diff from the state it -// wants to reach. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Provisioners []*ResourceProvisionerConfig - Flags ResourceFlag -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - -// InstanceInfo is used to hold information about the instance and/or -// resource being modified. -type InstanceInfo struct { - // Id is a unique name to represent this instance. This is not related - // to InstanceState.ID in any way. - Id string - - // ModulePath is the complete path of the module containing this - // instance. - ModulePath []string - - // Type is the resource type of this instance - Type string - - // uniqueExtra is an internal field that can be populated to supply - // extra metadata that is used to identify a unique instance in - // the graph walk. This will be appended to HumanID when uniqueId - // is called. - uniqueExtra string -} - -// HumanId is a unique Id that is human-friendly and useful for UI elements. -func (i *InstanceInfo) HumanId() string { - if i == nil { - return "" - } - - if len(i.ModulePath) <= 1 { - return i.Id - } - - return fmt.Sprintf( - "module.%s.%s", - strings.Join(i.ModulePath[1:], "."), - i.Id) -} - -func (i *InstanceInfo) uniqueId() string { - prefix := i.HumanId() - if v := i.uniqueExtra; v != "" { - prefix += " " + v - } - - return prefix -} - -// ResourceConfig holds the configuration given for a resource. This is -// done instead of a raw `map[string]interface{}` type so that rich -// methods can be added to it to make dealing with it easier. -type ResourceConfig struct { - ComputedKeys []string - Raw map[string]interface{} - Config map[string]interface{} - - raw *config.RawConfig -} - -// NewResourceConfig creates a new ResourceConfig from a config.RawConfig. -func NewResourceConfig(c *config.RawConfig) *ResourceConfig { - result := &ResourceConfig{raw: c} - result.interpolateForce() - return result -} - -// DeepCopy performs a deep copy of the configuration. This makes it safe -// to modify any of the structures that are part of the resource config without -// affecting the original configuration. -func (c *ResourceConfig) DeepCopy() *ResourceConfig { - // DeepCopying a nil should return a nil to avoid panics - if c == nil { - return nil - } - - // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) - if err != nil { - panic(err) - } - - // Force the type - result := copy.(*ResourceConfig) - - // For the raw configuration, we can just use its own copy method - result.raw = c.raw.Copy() - - return result -} - -// Equal checks the equality of two resource configs. -func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { - // If either are nil, then they're only equal if they're both nil - if c == nil || c2 == nil { - return c == c2 - } - - // Sort the computed keys so they're deterministic - sort.Strings(c.ComputedKeys) - sort.Strings(c2.ComputedKeys) - - // Two resource configs if their exported properties are equal. - // We don't compare "raw" because it is never used again after - // initialization and for all intents and purposes they are equal - // if the exported properties are equal. - check := [][2]interface{}{ - {c.ComputedKeys, c2.ComputedKeys}, - {c.Raw, c2.Raw}, - {c.Config, c2.Config}, - } - for _, pair := range check { - if !reflect.DeepEqual(pair[0], pair[1]) { - return false - } - } - - return true -} - -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - -// Get looks up a configuration value by key and returns the value. -// -// The second return value is true if the get was successful. Get will -// return the raw value if the key is computed, so you should pair this -// with IsComputed. -func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // We aim to get a value from the configuration. If it is computed, - // then we return the pure raw value. - source := c.Config - if c.IsComputed(k) { - source = c.Raw - } - - return c.get(k, source) -} - -// GetRaw looks up a configuration value by key and returns the value, -// from the raw, uninterpolated config. -// -// The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. -func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { - return c.get(k, c.Raw) -} - -// IsComputed returns whether the given key is computed or not. -func (c *ResourceConfig) IsComputed(k string) bool { - // The next thing we do is check the config if we get a computed - // value out of it. - v, ok := c.get(k, c.Config) - if !ok { - return false - } - - // If value is nil, then it isn't computed - if v == nil { - return false - } - - // Test if the value contains an unknown value - var w unknownCheckWalker - if err := reflectwalk.Walk(v, &w); err != nil { - panic(err) - } - - return w.Unknown -} - -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - -func (c *ResourceConfig) get( - k string, raw map[string]interface{}) (interface{}, bool) { - parts := strings.Split(k, ".") - if len(parts) == 1 && parts[0] == "" { - parts = nil - } - - var current interface{} = raw - var previous interface{} = nil - for i, part := range parts { - if current == nil { - return nil, false - } - - cv := reflect.ValueOf(current) - switch cv.Kind() { - case reflect.Map: - previous = current - v := cv.MapIndex(reflect.ValueOf(part)) - if !v.IsValid() { - if i > 0 && i != (len(parts)-1) { - tryKey := strings.Join(parts[i:], ".") - v := cv.MapIndex(reflect.ValueOf(tryKey)) - if !v.IsValid() { - return nil, false - } - - return v.Interface(), true - } - - return nil, false - } - - current = v.Interface() - case reflect.Slice: - previous = current - - if part == "#" { - // If any value in a list is computed, this whole thing - // is computed and we can't read any part of it. - for i := 0; i < cv.Len(); i++ { - if v := cv.Index(i).Interface(); v == unknownValue() { - return v, true - } - } - - current = cv.Len() - } else { - i, err := strconv.ParseInt(part, 0, 0) - if err != nil { - return nil, false - } - if i >= int64(cv.Len()) { - return nil, false - } - current = cv.Index(int(i)).Interface() - } - case reflect.String: - // This happens when map keys contain "." and have a common - // prefix so were split as path components above. - actualKey := strings.Join(parts[i-1:], ".") - if prevMap, ok := previous.(map[string]interface{}); ok { - v, ok := prevMap[actualKey] - return v, ok - } - - return nil, false - default: - panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) - } - } - - return current, true -} - -// interpolateForce is a temporary thing. We want to get rid of interpolate -// above and likewise this, but it can only be done after the f-ast-graph -// refactor is complete. -func (c *ResourceConfig) interpolateForce() { - if c.raw == nil { - var err error - c.raw, err = config.NewRawConfig(make(map[string]interface{})) - if err != nil { - panic(err) - } - } - - c.ComputedKeys = c.raw.UnknownKeys() - c.Raw = c.raw.RawMap() - c.Config = c.raw.Config() -} - -// unknownCheckWalker -type unknownCheckWalker struct { - Unknown bool -} - -func (w *unknownCheckWalker) Primitive(v reflect.Value) error { - if v.Interface() == unknownValue() { - w.Unknown = true - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go deleted file mode 100644 index 8badca8..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ /dev/null @@ -1,471 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// ResourceAddress is a way of identifying an individual resource (or, -// eventually, a subset of resources) within the state. It is used for Targets. -type ResourceAddress struct { - // Addresses a resource falling somewhere in the module path - // When specified alone, addresses all resources within a module path - Path []string - - // Addresses a specific resource that occurs in a list - Index int - - InstanceType InstanceType - InstanceTypeSet bool - Name string - Type string - Mode config.ResourceMode // significant only if InstanceTypeSet -} - -// Copy returns a copy of this ResourceAddress -func (r *ResourceAddress) Copy() *ResourceAddress { - if r == nil { - return nil - } - - n := &ResourceAddress{ - Path: make([]string, 0, len(r.Path)), - Index: r.Index, - InstanceType: r.InstanceType, - Name: r.Name, - Type: r.Type, - Mode: r.Mode, - } - for _, p := range r.Path { - n.Path = append(n.Path, p) - } - return n -} - -// String outputs the address that parses into this address. -func (r *ResourceAddress) String() string { - var result []string - for _, p := range r.Path { - result = append(result, "module", p) - } - - switch r.Mode { - case config.ManagedResourceMode: - // nothing to do - case config.DataResourceMode: - result = append(result, "data") - default: - panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) - } - - if r.Type != "" { - result = append(result, r.Type) - } - - if r.Name != "" { - name := r.Name - if r.InstanceTypeSet { - switch r.InstanceType { - case TypePrimary: - name += ".primary" - case TypeDeposed: - name += ".deposed" - case TypeTainted: - name += ".tainted" - } - } - - if r.Index >= 0 { - name += fmt.Sprintf("[%d]", r.Index) - } - result = append(result, name) - } - - return strings.Join(result, ".") -} - -// HasResourceSpec returns true if the address has a resource spec, as -// defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html -// In particular, this returns false if the address contains only -// a module path, thus addressing the entire module. -func (r *ResourceAddress) HasResourceSpec() bool { - return r.Type != "" && r.Name != "" -} - -// WholeModuleAddress returns the resource address that refers to all -// resources in the same module as the receiver address. -func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { - return &ResourceAddress{ - Path: r.Path, - Index: -1, - InstanceTypeSet: false, - } -} - -// MatchesConfig returns true if the receiver matches the given -// configuration resource within the given configuration module. -// -// Since resource configuration blocks represent all of the instances of -// a multi-instance resource, the index of the address (if any) is not -// considered. -func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool { - if r.HasResourceSpec() { - if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name { - return false - } - } - - addrPath := r.Path - cfgPath := mod.Path() - - // normalize - if len(addrPath) == 0 { - addrPath = nil - } - if len(cfgPath) == 0 { - cfgPath = nil - } - return reflect.DeepEqual(addrPath, cfgPath) -} - -// stateId returns the ID that this resource should be entered with -// in the state. This is also used for diffs. In the future, we'd like to -// move away from this string field so I don't export this. -func (r *ResourceAddress) stateId() string { - result := fmt.Sprintf("%s.%s", r.Type, r.Name) - switch r.Mode { - case config.ManagedResourceMode: - // Done - case config.DataResourceMode: - result = fmt.Sprintf("data.%s", result) - default: - panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) - } - if r.Index >= 0 { - result += fmt.Sprintf(".%d", r.Index) - } - - return result -} - -// parseResourceAddressConfig creates a resource address from a config.Resource -func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) { - return &ResourceAddress{ - Type: r.Type, - Name: r.Name, - Index: -1, - InstanceType: TypePrimary, - Mode: r.Mode, - }, nil -} - -// parseResourceAddressInternal parses the somewhat bespoke resource -// identifier used in states and diffs, such as "instance.name.0". -func parseResourceAddressInternal(s string) (*ResourceAddress, error) { - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - mode := config.ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - mode = config.DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && mode != config.DataResourceMode { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - addr := &ResourceAddress{ - Type: parts[0], - Name: parts[1], - Index: -1, - InstanceType: TypePrimary, - Mode: mode, - } - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) - } - - addr.Index = int(idx) - } - - return addr, nil -} - -func ParseResourceAddress(s string) (*ResourceAddress, error) { - matches, err := tokenizeResourceAddress(s) - if err != nil { - return nil, err - } - mode := config.ManagedResourceMode - if matches["data_prefix"] != "" { - mode = config.DataResourceMode - } - resourceIndex, err := ParseResourceIndex(matches["index"]) - if err != nil { - return nil, err - } - instanceType, err := ParseInstanceType(matches["instance_type"]) - if err != nil { - return nil, err - } - path := ParseResourcePath(matches["path"]) - - // not allowed to say "data." without a type following - if mode == config.DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf( - "invalid resource address %q: must target specific data instance", - s, - ) - } - - return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - InstanceTypeSet: matches["instance_type"] != "", - Name: matches["name"], - Type: matches["type"], - Mode: mode, - }, nil -} - -// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a -// resource name as described in a module diff. -// -// For historical reasons a different addressing format is used in this -// context. The internal format should not be shown in the UI and instead -// this function should be used to translate to a ResourceAddress and -// then, where appropriate, use the String method to produce a canonical -// resource address string for display in the UI. -// -// The given path slice must be empty (or nil) for the root module, and -// otherwise consist of a sequence of module names traversing down into -// the module tree. If a non-nil path is provided, the caller must not -// modify its underlying array after passing it to this function. -func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { - addr, err := parseResourceAddressInternal(key) - if err != nil { - return nil, err - } - addr.Path = path - return addr, nil -} - -// Contains returns true if and only if the given node is contained within -// the receiver. -// -// Containment is defined in terms of the module and resource heirarchy: -// a resource is contained within its module and any ancestor modules, -// an indexed resource instance is contained with the unindexed resource, etc. -func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { - ourPath := addr.Path - givenPath := other.Path - if len(givenPath) < len(ourPath) { - return false - } - for i := range ourPath { - if ourPath[i] != givenPath[i] { - return false - } - } - - // If the receiver is a whole-module address then the path prefix - // matching is all we need. - if !addr.HasResourceSpec() { - return true - } - - if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { - return false - } - - if addr.Index != -1 && addr.Index != other.Index { - return false - } - - if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { - return false - } - - return true -} - -// Equals returns true if the receiver matches the given address. -// -// The name of this method is a misnomer, since it doesn't test for exact -// equality. Instead, it tests that the _specified_ parts of each -// address match, treating any unspecified parts as wildcards. -// -// See also Contains, which takes a more heirarchical approach to comparing -// addresses. -func (addr *ResourceAddress) Equals(raw interface{}) bool { - other, ok := raw.(*ResourceAddress) - if !ok { - return false - } - - pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || - reflect.DeepEqual(addr.Path, other.Path) - - indexMatch := addr.Index == -1 || - other.Index == -1 || - addr.Index == other.Index - - nameMatch := addr.Name == "" || - other.Name == "" || - addr.Name == other.Name - - typeMatch := addr.Type == "" || - other.Type == "" || - addr.Type == other.Type - - // mode is significant only when type is set - modeMatch := addr.Type == "" || - other.Type == "" || - addr.Mode == other.Mode - - return pathMatch && - indexMatch && - addr.InstanceType == other.InstanceType && - nameMatch && - typeMatch && - modeMatch -} - -// Less returns true if and only if the receiver should be sorted before -// the given address when presenting a list of resource addresses to -// an end-user. -// -// This sort uses lexicographic sorting for most components, but uses -// numeric sort for indices, thus causing index 10 to sort after -// index 9, rather than after index 1. -func (addr *ResourceAddress) Less(other *ResourceAddress) bool { - - switch { - - case len(addr.Path) < len(other.Path): - return true - - case !reflect.DeepEqual(addr.Path, other.Path): - // If the two paths are the same length but don't match, we'll just - // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn. - addrStr := addr.String() - otherStr := other.String() - return addrStr < otherStr - - case addr.Mode == config.DataResourceMode && other.Mode != config.DataResourceMode: - return true - - case addr.Type < other.Type: - return true - - case addr.Name < other.Name: - return true - - case addr.Index < other.Index: - // Since "Index" is -1 for an un-indexed address, this also conveniently - // sorts unindexed addresses before indexed ones, should they both - // appear for some reason. - return true - - case other.InstanceTypeSet && !addr.InstanceTypeSet: - return true - - case addr.InstanceType < other.InstanceType: - // InstanceType is actually an enum, so this is just an arbitrary - // sort based on the enum numeric values, and thus not particularly - // meaningful. - return true - - default: - return false - - } -} - -func ParseResourceIndex(s string) (int, error) { - if s == "" { - return -1, nil - } - return strconv.Atoi(s) -} - -func ParseResourcePath(s string) []string { - if s == "" { - return nil - } - parts := strings.Split(s, ".") - path := make([]string, 0, len(parts)) - for _, s := range parts { - // Due to the limitations of the regexp match below, the path match has - // some noise in it we have to filter out :| - if s == "" || s == "module" { - continue - } - path = append(path, s) - } - return path -} - -func ParseInstanceType(s string) (InstanceType, error) { - switch s { - case "", "primary": - return TypePrimary, nil - case "deposed": - return TypeDeposed, nil - case "tainted": - return TypeTainted, nil - default: - return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) - } -} - -func tokenizeResourceAddress(s string) (map[string]string, error) { - // Example of portions of the regexp below using the - // string "aws_instance.web.tainted[1]" - re := regexp.MustCompile(`\A` + - // "module.foo.module.bar" (optional) - `(?P(?:module\.(?P[^.]+)\.?)*)` + - // possibly "data.", if targeting is a data resource - `(?P(?:data\.)?)` + - // "aws_instance.web" (optional when module path specified) - `(?:(?P[^.]+)\.(?P[^.[]+))?` + - // "tainted" (optional, omission implies: "primary") - `(?:\.(?P\w+))?` + - // "1" (optional, omission implies: "0") - `(?:\[(?P\d+)\])?` + - `\z`) - - groupNames := re.SubexpNames() - rawMatches := re.FindAllStringSubmatch(s, -1) - if len(rawMatches) != 1 { - return nil, fmt.Errorf("invalid resource address %q", s) - } - - matches := make(map[string]string) - for i, m := range rawMatches[0] { - matches[groupNames[i]] = m - } - - return matches, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go deleted file mode 100644 index 7d78f67..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ /dev/null @@ -1,285 +0,0 @@ -package terraform - -import ( - "fmt" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/plugin/discovery" -) - -// ResourceProvider is an interface that must be implemented by any -// resource provider: the thing that creates and manages the resources in -// a Terraform configuration. -// -// Important implementation note: All returned pointers, such as -// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to -// shared data. Terraform is highly parallel and assumes that this data is safe -// to read/write in parallel so it must be unique references. Note that it is -// safe to return arguments as results, however. -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // Input is called to ask the provider to ask the user for input - // for completing the configuration if necesarry. - // - // This may or may not be called, so resource provider writers shouldn't - // rely on this being available to set some default values for validate - // later. Example of a situation where this wouldn't be called is if - // the user is not using a TTY. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderError may be returned when creating a Context if the -// required providers cannot be satisfied. This error can then be used to -// format a more useful message for the user. -type ResourceProviderError struct { - Errors []error -} - -func (e *ResourceProviderError) Error() string { - // use multierror to format the default output - return multierror.Append(nil, e.Errors...).Error() -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string -} - -// ResourceProviderResolver is an interface implemented by objects that are -// able to resolve a given set of resource provider version constraints -// into ResourceProviderFactory callbacks. -type ResourceProviderResolver interface { - // Given a constraint map, return a ResourceProviderFactory for each - // requested provider. If some or all of the constraints cannot be - // satisfied, return a non-nil slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) -} - -// ResourceProviderResolverFunc wraps a callback function and turns it into -// a ResourceProviderResolver implementation, for convenience in situations -// where a function and its associated closure are sufficient as a resolver -// implementation. -type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) - -// ResolveProviders implements ResourceProviderResolver by calling the -// wrapped function. -func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - return f(reqd) -} - -// ResourceProviderResolverFixed returns a ResourceProviderResolver that -// has a fixed set of provider factories provided by the caller. The returned -// resolver ignores version constraints entirely and just returns the given -// factory for each requested provider name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { - return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - ret := make(map[string]ResourceProviderFactory, len(reqd)) - var errs []error - for name := range reqd { - if factory, exists := factories[name]; exists { - ret[name] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - -// resourceProviderFactories matches available plugins to the given version -// requirements to produce a map of compatible provider plugins if possible, -// or an error if the currently-available plugins are insufficient. -// -// This should be called only with configurations that have passed calls -// to config.Validate(), which ensures that all of the given version -// constraints are valid. It will panic if any invalid constraints are present. -func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) { - ret, errs := resolver.ResolveProviders(reqd) - if errs != nil { - return nil, &ResourceProviderError{ - Errors: errs, - } - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go deleted file mode 100644 index f531533..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ /dev/null @@ -1,297 +0,0 @@ -package terraform - -import "sync" - -// MockResourceProvider implements ResourceProvider but mocks out all the -// calls for testing purposes. -type MockResourceProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - CloseCalled bool - CloseError error - InputCalled bool - InputInput UIInput - InputConfig *ResourceConfig - InputReturnConfig *ResourceConfig - InputReturnError error - InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) - ApplyCalled bool - ApplyInfo *InstanceInfo - ApplyState *InstanceState - ApplyDiff *InstanceDiff - ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) - ApplyReturn *InstanceState - ApplyReturnError error - ConfigureCalled bool - ConfigureConfig *ResourceConfig - ConfigureFn func(*ResourceConfig) error - ConfigureReturnError error - DiffCalled bool - DiffInfo *InstanceInfo - DiffState *InstanceState - DiffDesired *ResourceConfig - DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) - DiffReturn *InstanceDiff - DiffReturnError error - RefreshCalled bool - RefreshInfo *InstanceInfo - RefreshState *InstanceState - RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) - RefreshReturn *InstanceState - RefreshReturnError error - ResourcesCalled bool - ResourcesReturn []ResourceType - ReadDataApplyCalled bool - ReadDataApplyInfo *InstanceInfo - ReadDataApplyDiff *InstanceDiff - ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) - ReadDataApplyReturn *InstanceState - ReadDataApplyReturnError error - ReadDataDiffCalled bool - ReadDataDiffInfo *InstanceInfo - ReadDataDiffDesired *ResourceConfig - ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - ReadDataDiffReturn *InstanceDiff - ReadDataDiffReturnError error - StopCalled bool - StopFn func() error - StopReturnError error - DataSourcesCalled bool - DataSourcesReturn []DataSource - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(*ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateResourceCalled bool - ValidateResourceType string - ValidateResourceConfig *ResourceConfig - ValidateResourceReturnWarns []string - ValidateResourceReturnErrors []error - ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateDataSourceCalled bool - ValidateDataSourceType string - ValidateDataSourceConfig *ResourceConfig - ValidateDataSourceReturnWarns []string - ValidateDataSourceReturnErrors []error - - ImportStateCalled bool - ImportStateInfo *InstanceInfo - ImportStateID string - ImportStateReturn []*InstanceState - ImportStateReturnError error - ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) -} - -func (p *MockResourceProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} - -func (p *MockResourceProvider) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - p.InputCalled = true - p.InputInput = input - p.InputConfig = c - if p.InputFn != nil { - return p.InputFn(input, c) - } - return p.InputReturnConfig, p.InputReturnError -} - -func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceCalled = true - p.ValidateResourceType = t - p.ValidateResourceConfig = c - - if p.ValidateResourceFn != nil { - return p.ValidateResourceFn(t, c) - } - - return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors -} - -func (p *MockResourceProvider) Configure(c *ResourceConfig) error { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureConfig = c - - if p.ConfigureFn != nil { - return p.ConfigureFn(c) - } - - return p.ConfigureReturnError -} - -func (p *MockResourceProvider) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} - -func (p *MockResourceProvider) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // We only lock while writing data. Reading is fine - p.Lock() - p.ApplyCalled = true - p.ApplyInfo = info - p.ApplyState = state - p.ApplyDiff = diff - p.Unlock() - - if p.ApplyFn != nil { - return p.ApplyFn(info, state, diff) - } - - return p.ApplyReturn.DeepCopy(), p.ApplyReturnError -} - -func (p *MockResourceProvider) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.DiffCalled = true - p.DiffInfo = info - p.DiffState = state - p.DiffDesired = desired - if p.DiffFn != nil { - return p.DiffFn(info, state, desired) - } - - return p.DiffReturn.DeepCopy(), p.DiffReturnError -} - -func (p *MockResourceProvider) Refresh( - info *InstanceInfo, - s *InstanceState) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.RefreshCalled = true - p.RefreshInfo = info - p.RefreshState = s - - if p.RefreshFn != nil { - return p.RefreshFn(info, s) - } - - return p.RefreshReturn.DeepCopy(), p.RefreshReturnError -} - -func (p *MockResourceProvider) Resources() []ResourceType { - p.Lock() - defer p.Unlock() - - p.ResourcesCalled = true - return p.ResourcesReturn -} - -func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ImportStateCalled = true - p.ImportStateInfo = info - p.ImportStateID = id - if p.ImportStateFn != nil { - return p.ImportStateFn(info, id) - } - - var result []*InstanceState - if p.ImportStateReturn != nil { - result = make([]*InstanceState, len(p.ImportStateReturn)) - for i, v := range p.ImportStateReturn { - result[i] = v.DeepCopy() - } - } - - return result, p.ImportStateReturnError -} - -func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceCalled = true - p.ValidateDataSourceType = t - p.ValidateDataSourceConfig = c - - if p.ValidateDataSourceFn != nil { - return p.ValidateDataSourceFn(t, c) - } - - return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors -} - -func (p *MockResourceProvider) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataDiffCalled = true - p.ReadDataDiffInfo = info - p.ReadDataDiffDesired = desired - if p.ReadDataDiffFn != nil { - return p.ReadDataDiffFn(info, desired) - } - - return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError -} - -func (p *MockResourceProvider) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataApplyCalled = true - p.ReadDataApplyInfo = info - p.ReadDataApplyDiff = d - - if p.ReadDataApplyFn != nil { - return p.ReadDataApplyFn(info, d) - } - - return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError -} - -func (p *MockResourceProvider) DataSources() []DataSource { - p.Lock() - defer p.Unlock() - - p.DataSourcesCalled = true - return p.DataSourcesReturn -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go deleted file mode 100644 index 361ec1e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ /dev/null @@ -1,54 +0,0 @@ -package terraform - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns the new - // resource state along with an error. Instead of a diff, the ResourceConfig - // is provided since provisioners only run after a resource has been - // newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go deleted file mode 100644 index f471a51..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ /dev/null @@ -1,72 +0,0 @@ -package terraform - -import "sync" - -// MockResourceProvisioner implements ResourceProvisioner but mocks out all the -// calls for testing purposes. -type MockResourceProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - ApplyCalled bool - ApplyOutput UIOutput - ApplyState *InstanceState - ApplyConfig *ResourceConfig - ApplyFn func(*InstanceState, *ResourceConfig) error - ApplyReturnError error - - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(c *ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - - StopCalled bool - StopFn func() error - StopReturnError error -} - -func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvisioner) Apply( - output UIOutput, - state *InstanceState, - c *ResourceConfig) error { - p.Lock() - - p.ApplyCalled = true - p.ApplyOutput = output - p.ApplyState = state - p.ApplyConfig = c - if p.ApplyFn != nil { - fn := p.ApplyFn - p.Unlock() - return fn(state, c) - } - - defer p.Unlock() - return p.ApplyReturnError -} - -func (p *MockResourceProvisioner) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go deleted file mode 100644 index 20f1d8a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/semantics.go +++ /dev/null @@ -1,132 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// GraphSemanticChecker is the interface that semantic checks across -// the entire Terraform graph implement. -// -// The graph should NOT be modified by the semantic checker. -type GraphSemanticChecker interface { - Check(*dag.Graph) error -} - -// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker -// that runs a list of SemanticCheckers against the vertices of the graph -// in no specified order. -type UnorderedSemanticCheckRunner struct { - Checks []SemanticChecker -} - -func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error { - var err error - for _, v := range g.Vertices() { - for _, check := range sc.Checks { - if e := check.Check(g, v); e != nil { - err = multierror.Append(err, e) - } - } - } - - return err -} - -// SemanticChecker is the interface that semantic checks across the -// Terraform graph implement. Errors are accumulated. Even after an error -// is returned, child vertices in the graph will still be visited. -// -// The graph should NOT be modified by the semantic checker. -// -// The order in which vertices are visited is left unspecified, so the -// semantic checks should not rely on that. -type SemanticChecker interface { - Check(*dag.Graph, dag.Vertex) error -} - -// smcUserVariables does all the semantic checks to verify that the -// variables given satisfy the configuration itself. -func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { - var errs []error - - cvs := make(map[string]*config.Variable) - for _, v := range c.Variables { - cvs[v.Name] = v - } - - // Check that all required variables are present - required := make(map[string]struct{}) - for _, v := range c.Variables { - if v.Required() { - required[v.Name] = struct{}{} - } - } - for k, _ := range vs { - delete(required, k) - } - if len(required) > 0 { - for k, _ := range required { - errs = append(errs, fmt.Errorf( - "Required variable not set: %s", k)) - } - } - - // Check that types match up - for name, proposedValue := range vs { - // Check for "map.key" fields. These stopped working with Terraform - // 0.7 but we do this to surface a better error message informing - // the user what happened. - if idx := strings.Index(name, "."); idx > 0 { - key := name[:idx] - if _, ok := cvs[key]; ok { - errs = append(errs, fmt.Errorf( - "%s: Overriding map keys with the format `name.key` is no "+ - "longer allowed. You may still override keys by setting "+ - "`name = { key = value }`. The maps will be merged. This "+ - "behavior appeared in 0.7.0.", name)) - continue - } - } - - schema, ok := cvs[name] - if !ok { - continue - } - - declaredType := schema.Type() - - switch declaredType { - case config.VariableTypeString: - switch proposedValue.(type) { - case string: - continue - } - case config.VariableTypeMap: - switch v := proposedValue.(type) { - case map[string]interface{}: - continue - case []map[string]interface{}: - // if we have a list of 1 map, it will get coerced later as needed - if len(v) == 1 { - continue - } - } - case config.VariableTypeList: - switch proposedValue.(type) { - case []interface{}: - continue - } - } - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) - } - - // TODO(mitchellh): variables that are unknown - - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go deleted file mode 100644 index 4632559..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -// Shadow is the interface that any "shadow" structures must implement. -// -// A shadow structure is an interface implementation (typically) that -// shadows a real implementation and verifies that the same behavior occurs -// on both. The semantics of this behavior are up to the interface itself. -// -// A shadow NEVER modifies real values or state. It must always be safe to use. -// -// For example, a ResourceProvider shadow ensures that the same operations -// are done on the same resources with the same configurations. -// -// The typical usage of a shadow following this interface is to complete -// the real operations, then call CloseShadow which tells the shadow that -// the real side is done. Then, once the shadow is also complete, call -// ShadowError to find any errors that may have been caught. -type Shadow interface { - // CloseShadow tells the shadow that the REAL implementation is - // complete. Therefore, any calls that would block should now return - // immediately since no more changes will happen to the real side. - CloseShadow() error - - // ShadowError returns the errors that the shadow has found. - // This should be called AFTER CloseShadow and AFTER the shadow is - // known to be complete (no more calls to it). - ShadowError() error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go deleted file mode 100644 index 116cf84..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go +++ /dev/null @@ -1,273 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// newShadowComponentFactory creates a shadowed contextComponentFactory -// so that requests to create new components result in both a real and -// shadow side. -func newShadowComponentFactory( - f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) { - // Create the shared data - shared := &shadowComponentFactoryShared{contextComponentFactory: f} - - // Create the real side - real := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - } - - // Create the shadow - shadow := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - Shadow: true, - } - - return real, shadow -} - -// shadowComponentFactory is the shadow side. Any components created -// with this factory are fake and will not cause real work to happen. -// -// Unlike other shadowers, the shadow component factory will allow the -// shadow to create _any_ component even if it is never requested on the -// real side. This is because errors will happen later downstream as function -// calls are made to the shadows that are never matched on the real side. -type shadowComponentFactory struct { - *shadowComponentFactoryShared - - Shadow bool // True if this should return the shadow - lock sync.Mutex -} - -func (f *shadowComponentFactory) ResourceProvider( - n, uid string) (ResourceProvider, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid) - var result ResourceProvider = real - if f.Shadow { - result = shadow - } - - return result, err -} - -func (f *shadowComponentFactory) ResourceProvisioner( - n, uid string) (ResourceProvisioner, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid) - var result ResourceProvisioner = real - if f.Shadow { - result = shadow - } - - return result, err -} - -// CloseShadow is called when the _real_ side is complete. This will cause -// all future blocking operations to return immediately on the shadow to -// ensure the shadow also completes. -func (f *shadowComponentFactory) CloseShadow() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're already closed, its an error - if shared.closed { - return fmt.Errorf("component factory shadow already closed") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - // Mark ourselves as closed - shared.closed = true - - return result -} - -func (f *shadowComponentFactory) ShadowError() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're not closed, its an error - if !shared.closed { - return fmt.Errorf("component factory must be closed to retrieve errors") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - return result -} - -// shadowComponentFactoryShared is shared data between the two factories. -// -// It is NOT SAFE to run any function on this struct in parallel. Lock -// access to this struct. -type shadowComponentFactoryShared struct { - contextComponentFactory - - closed bool - providers shadow.KeyedValue - providerKeys []string - provisioners shadow.KeyedValue - provisionerKeys []string -} - -// shadowResourceProviderFactoryEntry is the entry that is stored in -// the Shadows key/value for a provider. -type shadowComponentFactoryProviderEntry struct { - Real ResourceProvider - Shadow shadowResourceProvider - Err error -} - -type shadowComponentFactoryProvisionerEntry struct { - Real ResourceProvisioner - Shadow shadowResourceProvisioner - Err error -} - -func (f *shadowComponentFactoryShared) ResourceProvider( - n, uid string) (ResourceProvider, shadowResourceProvider, error) { - // Determine if we already have a value - raw, ok := f.providers.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProviderEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvider(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // Create the shadow - real, shadow := newShadowResourceProvider(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.providers.SetValue(uid, &entry) - f.providerKeys = append(f.providerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProviderEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} - -func (f *shadowComponentFactoryShared) ResourceProvisioner( - n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) { - // Determine if we already have a value - raw, ok := f.provisioners.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProvisionerEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvisioner(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // For now, just create a mock since we don't support provisioners yet - real, shadow := newShadowResourceProvisioner(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.provisioners.SetValue(uid, &entry) - f.provisionerKeys = append(f.provisionerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProvisionerEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go deleted file mode 100644 index 5588af2..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go +++ /dev/null @@ -1,158 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/copystructure" -) - -// newShadowContext creates a new context that will shadow the given context -// when walking the graph. The resulting context should be used _only once_ -// for a graph walk. -// -// The returned Shadow should be closed after the graph walk with the -// real context is complete. Errors from the shadow can be retrieved there. -// -// Most importantly, any operations done on the shadow context (the returned -// context) will NEVER affect the real context. All structures are deep -// copied, no real providers or resources are used, etc. -func newShadowContext(c *Context) (*Context, *Context, Shadow) { - // Copy the targets - targetRaw, err := copystructure.Copy(c.targets) - if err != nil { - panic(err) - } - - // Copy the variables - varRaw, err := copystructure.Copy(c.variables) - if err != nil { - panic(err) - } - - // Copy the provider inputs - providerInputRaw, err := copystructure.Copy(c.providerInputConfig) - if err != nil { - panic(err) - } - - // The factories - componentsReal, componentsShadow := newShadowComponentFactory(c.components) - - // Create the shadow - shadow := &Context{ - components: componentsShadow, - destroy: c.destroy, - diff: c.diff.DeepCopy(), - hooks: nil, - meta: c.meta, - module: c.module, - state: c.state.DeepCopy(), - targets: targetRaw.([]string), - variables: varRaw.(map[string]interface{}), - - // NOTE(mitchellh): This is not going to work for shadows that are - // testing that input results in the proper end state. At the time - // of writing, input is not used in any state-changing graph - // walks anyways, so this checks nothing. We set it to this to avoid - // any panics but even a "nil" value worked here. - uiInput: new(MockUIInput), - - // Hardcoded to 4 since parallelism in the shadow doesn't matter - // a ton since we're doing far less compared to the real side - // and our operations are MUCH faster. - parallelSem: NewSemaphore(4), - providerInputConfig: providerInputRaw.(map[string]map[string]interface{}), - } - - // Create the real context. This is effectively just a copy of - // the context given except we need to modify some of the values - // to point to the real side of a shadow so the shadow can compare values. - real := &Context{ - // The fields below are changed. - components: componentsReal, - - // The fields below are direct copies - destroy: c.destroy, - diff: c.diff, - // diffLock - no copy - hooks: c.hooks, - meta: c.meta, - module: c.module, - sh: c.sh, - state: c.state, - // stateLock - no copy - targets: c.targets, - uiInput: c.uiInput, - variables: c.variables, - - // l - no copy - parallelSem: c.parallelSem, - providerInputConfig: c.providerInputConfig, - runContext: c.runContext, - runContextCancel: c.runContextCancel, - shadowErr: c.shadowErr, - } - - return real, shadow, &shadowContextCloser{ - Components: componentsShadow, - } -} - -// shadowContextVerify takes the real and shadow context and verifies they -// have equal diffs and states. -func shadowContextVerify(real, shadow *Context) error { - var result error - - // The states compared must be pruned so they're minimal/clean - real.state.prune() - shadow.state.prune() - - // Compare the states - if !real.state.Equal(shadow.state) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow states do not match! "+ - "Real state:\n\n%s\n\n"+ - "Shadow state:\n\n%s\n\n", - real.state, shadow.state)) - } - - // Compare the diffs - if !real.diff.Equal(shadow.diff) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow diffs do not match! "+ - "Real diff:\n\n%s\n\n"+ - "Shadow diff:\n\n%s\n\n", - real.diff, shadow.diff)) - } - - return result -} - -// shadowContextCloser is the io.Closer returned by newShadowContext that -// closes all the shadows and returns the results. -type shadowContextCloser struct { - Components *shadowComponentFactory -} - -// Close closes the shadow context. -func (c *shadowContextCloser) CloseShadow() error { - return c.Components.CloseShadow() -} - -func (c *shadowContextCloser) ShadowError() error { - err := c.Components.ShadowError() - if err == nil { - return nil - } - - // This is a sad edge case: if the configuration contains uuid() at - // any point, we cannot reason aboyt the shadow execution. Tested - // with Context2Plan_shadowUuid. - if strings.Contains(err.Error(), "uuid()") { - err = nil - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go deleted file mode 100644 index 9741d7e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go +++ /dev/null @@ -1,815 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvider implements ResourceProvider for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provider. This shouldn't -// be used directly. -type shadowResourceProvider interface { - ResourceProvider - Shadow -} - -// newShadowResourceProvider creates a new shadowed ResourceProvider. -// -// This will assume a well behaved real ResourceProvider. For example, -// it assumes that the `Resources` call underneath doesn't change values -// since once it is called on the real provider, it will be cached and -// returned in the shadow since number of calls to that shouldn't affect -// actual behavior. -// -// However, with calls like Apply, call order is taken into account, -// parameters are checked for equality, etc. -func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) { - // Create the shared data - shared := shadowResourceProviderShared{} - - // Create the real provider that does actual work - real := &shadowResourceProviderReal{ - ResourceProvider: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProviderShadow{ - Shared: &shared, - - resources: p.Resources(), - dataSources: p.DataSources(), - } - - return real, shadow -} - -// shadowResourceProviderReal is the real resource provider. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProviderReal struct { - ResourceProvider - - Shared *shadowResourceProviderShared -} - -func (p *shadowResourceProviderReal) Close() error { - var result error - if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProviderReal) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - cCopy := c.DeepCopy() - - result, err := p.ResourceProvider.Input(input, c) - p.Shared.Input.SetValue(&shadowResourceProviderInput{ - Config: cCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvider.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProviderValidate{ - Config: c.DeepCopy(), - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error { - cCopy := c.DeepCopy() - - err := p.ResourceProvider.Configure(c) - p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{ - Config: cCopy, - Result: err, - }) - - return err -} - -func (p *shadowResourceProviderReal) Stop() error { - return p.ResourceProvider.Stop() -} - -func (p *shadowResourceProviderReal) ValidateResource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateResource(t, c) - - // Initialize to ensure we always have a wrapper with a lock - p.Shared.ValidateResource.Init( - key, &shadowResourceProviderValidateResourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateResource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateResource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // With it locked, call SetValue again so that it triggers WaitForChange - p.Shared.ValidateResource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.Apply(info, state, diff) - p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{ - State: stateCopy, - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.Diff(info, state, desired) - p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{ - State: stateCopy, - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - - result, err := p.ResourceProvider.Refresh(info, state) - p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{ - State: stateCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateDataSource(t, c) - - // Initialize - p.Shared.ValidateDataSource.Init( - key, &shadowResourceProviderValidateDataSourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateDataSource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // Set it - p.Shared.ValidateDataSource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // These have to be copied before the call since call can modify - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.ReadDataDiff(info, desired) - p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{ - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ReadDataApply( - info *InstanceInfo, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.ReadDataApply(info, diff) - p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{ - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -// shadowResourceProviderShadow is the shadow resource provider. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProviderShadow struct { - Shared *shadowResourceProviderShared - - // Cached values that are expected to not change - resources []ResourceType - dataSources []DataSource - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProviderShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Input shadow.Value - Validate shadow.Value - Configure shadow.Value - ValidateResource shadow.KeyedValue - Apply shadow.KeyedValue - Diff shadow.KeyedValue - Refresh shadow.KeyedValue - ValidateDataSource shadow.KeyedValue - ReadDataDiff shadow.KeyedValue - ReadDataApply shadow.KeyedValue -} - -func (p *shadowResourceProviderShared) Close() error { - return shadow.Close(p) -} - -func (p *shadowResourceProviderShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProviderShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProviderShadow) Resources() []ResourceType { - return p.resources -} - -func (p *shadowResourceProviderShadow) DataSources() []DataSource { - return p.dataSources -} - -func (p *shadowResourceProviderShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProviderShadow) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - // Get the result of the input call - raw := p.Shared.Input.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderInput) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'input' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error { - // Get the result of the call - raw := p.Shared.Configure.Value() - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProviderConfigure) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'configure' shadow value: %#v", raw)) - return nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result -} - -// Stop returns immediately. -func (p *shadowResourceProviderShadow) Stop() error { - return nil -} - -func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateResource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateResource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateResource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Apply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' call for %q:\n\n%#v\n\n%#v", - key, state, diff)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - if !diff.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Diff, diff)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Diff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' call for %q:\n\n%#v\n\n%#v", - key, state, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Refresh.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' call for %q:\n\n%#v", - key, state)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderRefresh) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateDataSource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateDataSource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' shadow value: %#v", raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateDataSource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataDiff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' call for %q:\n\n%#v", - key, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataApply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' call for %q:\n\n%#v", - key, d)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !d.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - result.Diff, d)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - panic("import not supported by shadow graph") -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProviderInput struct { - Config *ResourceConfig - Result *ResourceConfig - ResultErr error -} - -type shadowResourceProviderValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProviderConfigure struct { - Config *ResourceConfig - Result error -} - -type shadowResourceProviderValidateResourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateResource -} - -type shadowResourceProviderValidateResource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderApply struct { - State *InstanceState - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderDiff struct { - State *InstanceState - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderRefresh struct { - State *InstanceState - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderValidateDataSourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateDataSource -} - -type shadowResourceProviderValidateDataSource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderReadDataDiff struct { - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderReadDataApply struct { - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go deleted file mode 100644 index 60a4908..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go +++ /dev/null @@ -1,282 +0,0 @@ -package terraform - -import ( - "fmt" - "io" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvisioner implements ResourceProvisioner for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provisioner. This shouldn't -// be used directly. -type shadowResourceProvisioner interface { - ResourceProvisioner - Shadow -} - -// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner. -func newShadowResourceProvisioner( - p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) { - // Create the shared data - shared := shadowResourceProvisionerShared{ - Validate: shadow.ComparedValue{ - Func: shadowResourceProvisionerValidateCompare, - }, - } - - // Create the real provisioner that does actual work - real := &shadowResourceProvisionerReal{ - ResourceProvisioner: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProvisionerShadow{ - Shared: &shared, - } - - return real, shadow -} - -// shadowResourceProvisionerReal is the real resource provisioner. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProvisionerReal struct { - ResourceProvisioner - - Shared *shadowResourceProvisionerShared -} - -func (p *shadowResourceProvisionerReal) Close() error { - var result error - if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvisioner.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{ - Config: c, - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProvisionerReal) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - err := p.ResourceProvisioner.Apply(output, s, c) - - // Write the result, grab a lock for writing. This should nver - // block long since the operations below don't block. - p.Shared.ApplyLock.Lock() - defer p.Shared.ApplyLock.Unlock() - - key := s.ID - raw, ok := p.Shared.Apply.ValueOk(key) - if !ok { - // Setup a new value - raw = &shadow.ComparedValue{ - Func: shadowResourceProvisionerApplyCompare, - } - - // Set it - p.Shared.Apply.SetValue(key, raw) - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - // Just log and return so that we don't cause the real side - // any side effects. - log.Printf("[ERROR] unknown value in 'apply': %#v", raw) - return err - } - - // Write the resulting value - compareVal.SetValue(&shadowResourceProvisionerApply{ - Config: c, - ResultErr: err, - }) - - return err -} - -func (p *shadowResourceProvisionerReal) Stop() error { - return p.ResourceProvisioner.Stop() -} - -// shadowResourceProvisionerShadow is the shadow resource provisioner. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProvisionerShadow struct { - Shared *shadowResourceProvisionerShared - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProvisionerShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Validate shadow.ComparedValue - Apply shadow.KeyedValue - ApplyLock sync.Mutex // For writing only -} - -func (p *shadowResourceProvisionerShared) Close() error { - closers := []io.Closer{ - &p.CloseErr, - } - - for _, c := range closers { - // This should never happen, but we don't panic because a panic - // could affect the real behavior of Terraform and a shadow should - // never be able to do that. - if err := c.Close(); err != nil { - return err - } - } - - return nil -} - -func (p *shadowResourceProvisionerShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProvisionerShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProvisionerShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value(c) - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProvisionerValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // We don't need to compare configurations because we key on the - // configuration so just return right away. - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - // Get the value based on the key - key := s.ID - raw := p.Shared.Apply.Value(key) - if raw == nil { - return nil - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - // With the compared value, we compare against our config - raw = compareVal.Value(c) - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProvisionerApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - return result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Stop() error { - // For the shadow, we always just return nil since a Stop indicates - // that we were interrupted and shadows are disabled during interrupts - // anyways. - return nil -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProvisionerValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProvisionerApply struct { - Config *ResourceConfig - ResultErr error -} - -func shadowResourceProvisionerValidateCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerValidate) - if !ok { - return false - } - - return c.Equal(result.Config) -} - -func shadowResourceProvisionerApplyCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerApply) - if !ok { - return false - } - - return c.Equal(result.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go deleted file mode 100644 index 0c46194..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ /dev/null @@ -1,2136 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" - "github.com/mitchellh/copystructure" - "github.com/satori/go.uuid" -) - -const ( - // StateVersion is the current version for our state file - StateVersion = 3 -) - -// rootModulePath is the path of the root module -var rootModulePath = []string{"root"} - -// normalizeModulePath takes a raw module path and returns a path that -// has the rootModulePath prepended to it. If I could go back in time I -// would've never had a rootModulePath (empty path would be root). We can -// still fix this but thats a big refactor that my branch doesn't make sense -// for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) []string { - k := len(rootModulePath) - - // If we already have a root module prefix, we're done - if len(p) >= len(rootModulePath) { - if reflect.DeepEqual(p[:k], rootModulePath) { - return p - } - } - - // None? Prefix it - result := make([]string, len(rootModulePath)+len(p)) - copy(result, rootModulePath) - copy(result[k:], p) - return result -} - -// State keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -type State struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *RemoteState `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *BackendState `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*ModuleState `json:"modules"` - - mu sync.Mutex -} - -func (s *State) Lock() { s.mu.Lock() } -func (s *State) Unlock() { s.mu.Unlock() } - -// NewState is used to initialize a blank state -func NewState() *State { - s := &State{} - s.init() - return s -} - -// Children returns the ModuleStates that are direct children of -// the given path. If the path is "root", for example, then children -// returned might be "root.child", but not "root.child.grandchild". -func (s *State) Children(path []string) []*ModuleState { - s.Lock() - defer s.Unlock() - // TODO: test - - return s.children(path) -} - -func (s *State) children(path []string) []*ModuleState { - result := make([]*ModuleState, 0) - for _, m := range s.Modules { - if m == nil { - continue - } - - if len(m.Path) != len(path)+1 { - continue - } - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - result = append(result, m) - } - - return result -} - -// AddModule adds the module with the given path to the state. -// -// This should be the preferred method to add module states since it -// allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path []string) *ModuleState { - s.Lock() - defer s.Unlock() - - return s.addModule(path) -} - -func (s *State) addModule(path []string) *ModuleState { - // check if the module exists first - m := s.moduleByPath(path) - if m != nil { - return m - } - - m = &ModuleState{Path: path} - m.init() - s.Modules = append(s.Modules, m) - s.sort() - return m -} - -// ModuleByPath is used to lookup the module state for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (s *State) ModuleByPath(path []string) *ModuleState { - if s == nil { - return nil - } - s.Lock() - defer s.Unlock() - - return s.moduleByPath(path) -} - -func (s *State) moduleByPath(path []string) *ModuleState { - for _, mod := range s.Modules { - if mod == nil { - continue - } - if mod.Path == nil { - panic("missing module path") - } - if reflect.DeepEqual(mod.Path, path) { - return mod - } - } - return nil -} - -// ModuleOrphans returns all the module orphans in this state by -// returning their full paths. These paths can be used with ModuleByPath -// to return the actual state. -func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string { - s.Lock() - defer s.Unlock() - - return s.moduleOrphans(path, c) - -} - -func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { - // direct keeps track of what direct children we have both in our config - // and in our state. childrenKeys keeps track of what isn't an orphan. - direct := make(map[string]struct{}) - childrenKeys := make(map[string]struct{}) - if c != nil { - for _, m := range c.Modules { - childrenKeys[m.Name] = struct{}{} - direct[m.Name] = struct{}{} - } - } - - // Go over the direct children and find any that aren't in our keys. - var orphans [][]string - for _, m := range s.children(path) { - key := m.Path[len(m.Path)-1] - - // Record that we found this key as a direct child. We use this - // later to find orphan nested modules. - direct[key] = struct{}{} - - // If we have a direct child still in our config, it is not an orphan - if _, ok := childrenKeys[key]; ok { - continue - } - - orphans = append(orphans, m.Path) - } - - // Find the orphans that are nested... - for _, m := range s.Modules { - if m == nil { - continue - } - - // We only want modules that are at least grandchildren - if len(m.Path) < len(path)+2 { - continue - } - - // If it isn't part of our tree, continue - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - // If we have the direct child, then just skip it. - key := m.Path[len(path)] - if _, ok := direct[key]; ok { - continue - } - - orphanPath := m.Path[:len(path)+1] - - // Don't double-add if we've already added this orphan (which can happen if - // there are multiple nested sub-modules that get orphaned together). - alreadyAdded := false - for _, o := range orphans { - if reflect.DeepEqual(o, orphanPath) { - alreadyAdded = true - break - } - } - if alreadyAdded { - continue - } - - // Add this orphan - orphans = append(orphans, orphanPath) - } - - return orphans -} - -// Empty returns true if the state is empty. -func (s *State) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return len(s.Modules) == 0 -} - -// HasResources returns true if the state contains any resources. -// -// This is similar to !s.Empty, but returns true also in the case where the -// state has modules but all of them are devoid of resources. -func (s *State) HasResources() bool { - if s.Empty() { - return false - } - - for _, mod := range s.Modules { - if len(mod.Resources) > 0 { - return true - } - } - - return false -} - -// IsRemote returns true if State represents a state that exists and is -// remote. -func (s *State) IsRemote() bool { - if s == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Remote == nil { - return false - } - if s.Remote.Type == "" { - return false - } - - return true -} - -// Validate validates the integrity of this state file. -// -// Certain properties of the statefile are expected by Terraform in order -// to behave properly. The core of Terraform will assume that once it -// receives a State structure that it has been validated. This validation -// check should be called to ensure that. -// -// If this returns an error, then the user should be notified. The error -// response will include detailed information on the nature of the error. -func (s *State) Validate() error { - s.Lock() - defer s.Unlock() - - var result error - - // !!!! FOR DEVELOPERS !!!! - // - // Any errors returned from this Validate function will BLOCK TERRAFORM - // from loading a state file. Therefore, this should only contain checks - // that are only resolvable through manual intervention. - // - // !!!! FOR DEVELOPERS !!!! - - // Make sure there are no duplicate module states. We open a new - // block here so we can use basic variable names and future validations - // can do the same. - { - found := make(map[string]struct{}) - for _, ms := range s.Modules { - if ms == nil { - continue - } - - key := strings.Join(ms.Path, ".") - if _, ok := found[key]; ok { - result = multierror.Append(result, fmt.Errorf( - strings.TrimSpace(stateValidateErrMultiModule), key)) - continue - } - - found[key] = struct{}{} - } - } - - return result -} - -// Remove removes the item in the state at the given address, returning -// any errors that may have occurred. -// -// If the address references a module state or resource, it will delete -// all children as well. To check what will be deleted, use a StateFilter -// first. -func (s *State) Remove(addr ...string) error { - s.Lock() - defer s.Unlock() - - // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) - if err != nil { - return err - } - - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. - if len(results) == 0 { - return nil - } - - // Go through each result and grab what we need - removed := make(map[interface{}]struct{}) - for _, r := range results { - // Convert the path to our own type - path := append([]string{"root"}, r.Path...) - - // If we removed this already, then ignore - if _, ok := removed[r.Value]; ok { - continue - } - - // If we removed the parent already, then ignore - if r.Parent != nil { - if _, ok := removed[r.Parent.Value]; ok { - continue - } - } - - // Add this to the removed list - removed[r.Value] = struct{}{} - - switch v := r.Value.(type) { - case *ModuleState: - s.removeModule(path, v) - case *ResourceState: - s.removeResource(path, v) - case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) - default: - return fmt.Errorf("unknown type to delete: %T", r.Value) - } - } - - // Prune since the removal functions often do the bare minimum to - // remove a thing and may leave around dangling empty modules, resources, - // etc. Prune will clean that all up. - s.prune() - - return nil -} - -func (s *State) removeModule(path []string, v *ModuleState) { - for i, m := range s.Modules { - if m == v { - s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil - return - } - } -} - -func (s *State) removeResource(path []string, v *ResourceState) { - // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(path) - if mod == nil { - return - } - - // Find this resource. This is a O(N) lookup when if we had the key - // it could be O(1) but even with thousands of resources this shouldn't - // matter right now. We can easily up performance here when the time comes. - for k, r := range mod.Resources { - if r == v { - // Found it - delete(mod.Resources, k) - return - } - } -} - -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { - // Go through the resource and find the instance that matches this - // (if any) and remove it. - - // Check primary - if r.Primary == v { - r.Primary = nil - return - } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } -} - -// RootModule returns the ModuleState for the root module -func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(rootModulePath) - if root == nil { - panic("missing root module") - } - return root -} - -// Equal tests if one state is equal to another. -func (s *State) Equal(other *State) bool { - // If one is nil, we do a direct check - if s == nil || other == nil { - return s == other - } - - s.Lock() - defer s.Unlock() - return s.equal(other) -} - -func (s *State) equal(other *State) bool { - if s == nil || other == nil { - return s == other - } - - // If the versions are different, they're certainly not equal - if s.Version != other.Version { - return false - } - - // If any of the modules are not equal, then this state isn't equal - if len(s.Modules) != len(other.Modules) { - return false - } - for _, m := range s.Modules { - // This isn't very optimal currently but works. - otherM := other.moduleByPath(m.Path) - if otherM == nil { - return false - } - - // If they're not equal, then we're not equal! - if !m.Equal(otherM) { - return false - } - } - - return true -} - -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - -type StateAgeComparison int - -const ( - StateAgeEqual StateAgeComparison = 0 - StateAgeReceiverNewer StateAgeComparison = 1 - StateAgeReceiverOlder StateAgeComparison = -1 -) - -// CompareAges compares one state with another for which is "older". -// -// This is a simple check using the state's serial, and is thus only as -// reliable as the serial itself. In the normal case, only one state -// exists for a given combination of lineage/serial, but Terraform -// does not guarantee this and so the result of this method should be -// used with care. -// -// Returns an integer that is negative if the receiver is older than -// the argument, positive if the converse, and zero if they are equal. -// An error is returned if the two states are not of the same lineage, -// in which case the integer returned has no meaning. -func (s *State) CompareAges(other *State) (StateAgeComparison, error) { - // nil states are "older" than actual states - switch { - case s != nil && other == nil: - return StateAgeReceiverNewer, nil - case s == nil && other != nil: - return StateAgeReceiverOlder, nil - case s == nil && other == nil: - return StateAgeEqual, nil - } - - if !s.SameLineage(other) { - return StateAgeEqual, fmt.Errorf( - "can't compare two states of differing lineage", - ) - } - - s.Lock() - defer s.Unlock() - - switch { - case s.Serial < other.Serial: - return StateAgeReceiverOlder, nil - case s.Serial > other.Serial: - return StateAgeReceiverNewer, nil - default: - return StateAgeEqual, nil - } -} - -// SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the receiver. -func (s *State) SameLineage(other *State) bool { - s.Lock() - defer s.Unlock() - - // If one of the states has no lineage then it is assumed to predate - // this concept, and so we'll accept it as belonging to any lineage - // so that a lineage string can be assigned to newer versions - // without breaking compatibility with older versions. - if s.Lineage == "" || other.Lineage == "" { - return true - } - - return s.Lineage == other.Lineage -} - -// DeepCopy performs a deep copy of the state structure and returns -// a new structure. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*State) -} - -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return SemVersion.LessThan(v) -} - -func (s *State) Init() { - s.Lock() - defer s.Unlock() - s.init() -} - -func (s *State) init() { - if s.Version == 0 { - s.Version = StateVersion - } - - if s.moduleByPath(rootModulePath) == nil { - s.addModule(rootModulePath) - } - s.ensureHasLineage() - - for _, mod := range s.Modules { - if mod != nil { - mod.init() - } - } - - if s.Remote != nil { - s.Remote.init() - } - -} - -func (s *State) EnsureHasLineage() { - s.Lock() - defer s.Unlock() - - s.ensureHasLineage() -} - -func (s *State) ensureHasLineage() { - if s.Lineage == "" { - s.Lineage = uuid.NewV4().String() - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) - } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) - } -} - -// AddModuleState insert this module state and override any existing ModuleState -func (s *State) AddModuleState(mod *ModuleState) { - mod.init() - s.Lock() - defer s.Unlock() - - s.addModuleState(mod) -} - -func (s *State) addModuleState(mod *ModuleState) { - for i, m := range s.Modules { - if reflect.DeepEqual(m.Path, mod.Path) { - s.Modules[i] = mod - return - } - } - - s.Modules = append(s.Modules, mod) - s.sort() -} - -// prune is used to remove any resources that are no longer required -func (s *State) prune() { - if s == nil { - return - } - - // Filter out empty modules. - // A module is always assumed to have a path, and it's length isn't always - // bounds checked later on. Modules may be "emptied" during destroy, but we - // never want to store those in the state. - for i := 0; i < len(s.Modules); i++ { - if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { - s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) - i-- - } - } - - for _, mod := range s.Modules { - mod.prune() - } - if s.Remote != nil && s.Remote.Empty() { - s.Remote = nil - } -} - -// sort sorts the modules -func (s *State) sort() { - sort.Sort(moduleStateSort(s.Modules)) - - // Allow modules to be sorted - for _, m := range s.Modules { - if m != nil { - m.sort() - } - } -} - -func (s *State) String() string { - if s == nil { - return "" - } - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - for _, m := range s.Modules { - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// BackendState stores the configuration to connect to a remote backend. -type BackendState struct { - Type string `json:"type"` // Backend type - Config map[string]interface{} `json:"config"` // Backend raw config - - // Hash is the hash code to uniquely identify the original source - // configuration. We use this to detect when there is a change in - // configuration even when "type" isn't changed. - Hash uint64 `json:"hash"` -} - -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -// The Hash stored in the backend state needs to match the config itself, but -// we need to compare the backend config after it has been combined with all -// options. -// This function must match the implementation used by config.Backend. -func (s *BackendState) Rehash() uint64 { - if s == nil { - return 0 - } - - cfg := config.Backend{ - Type: s.Type, - RawConfig: &config.RawConfig{ - Raw: s.Config, - }, - } - - return cfg.Rehash() -} - -// RemoteState is used to track the information about a remote -// state store that we push/pull state to. -type RemoteState struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` - - mu sync.Mutex -} - -func (s *RemoteState) Lock() { s.mu.Lock() } -func (s *RemoteState) Unlock() { s.mu.Unlock() } - -func (r *RemoteState) init() { - r.Lock() - defer r.Unlock() - - if r.Config == nil { - r.Config = make(map[string]string) - } -} - -func (r *RemoteState) deepcopy() *RemoteState { - r.Lock() - defer r.Unlock() - - confCopy := make(map[string]string, len(r.Config)) - for k, v := range r.Config { - confCopy[k] = v - } - return &RemoteState{ - Type: r.Type, - Config: confCopy, - } -} - -func (r *RemoteState) Empty() bool { - if r == nil { - return true - } - r.Lock() - defer r.Unlock() - - return r.Type == "" -} - -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - -// OutputState is used to track the state relevant to a single output. -type OutputState struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -func (s *OutputState) Lock() { s.mu.Lock() } -func (s *OutputState) Unlock() { s.mu.Unlock() } - -func (s *OutputState) String() string { - return fmt.Sprintf("%#v", s.Value) -} - -// Equal compares two OutputState structures for equality. nil values are -// considered equal. -func (s *OutputState) Equal(other *OutputState) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Sensitive != other.Sensitive { - return false - } - - if !reflect.DeepEqual(s.Value, other.Value) { - return false - } - - return true -} - -func (s *OutputState) deepcopy() *OutputState { - if s == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(fmt.Errorf("Error copying output value: %s", err)) - } - - return stateCopy.(*OutputState) -} - -// ModuleState is used to track all the state relevant to a single -// module. Previous to Terraform 0.3, all state belonged to the "root" -// module. -type ModuleState struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*OutputState `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*ResourceState `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - mu sync.Mutex -} - -func (s *ModuleState) Lock() { s.mu.Lock() } -func (s *ModuleState) Unlock() { s.mu.Unlock() } - -// Equal tests whether one module state is equal to another. -func (m *ModuleState) Equal(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - // Paths must be equal - if !reflect.DeepEqual(m.Path, other.Path) { - return false - } - - // Outputs must be equal - if len(m.Outputs) != len(other.Outputs) { - return false - } - for k, v := range m.Outputs { - if !other.Outputs[k].Equal(v) { - return false - } - } - - // Dependencies must be equal. This sorts these in place but - // this shouldn't cause any problems. - sort.Strings(m.Dependencies) - sort.Strings(other.Dependencies) - if len(m.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range m.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // Resources must be equal - if len(m.Resources) != len(other.Resources) { - return false - } - for k, r := range m.Resources { - otherR, ok := other.Resources[k] - if !ok { - return false - } - - if !r.Equal(otherR) { - return false - } - } - - return true -} - -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *config.Config) []string { - m.Lock() - defer m.Unlock() - - keys := make(map[string]struct{}) - for k, _ := range m.Resources { - keys[k] = struct{}{} - } - - if c != nil { - for _, r := range c.Resources { - delete(keys, r.Id()) - - for k, _ := range keys { - if strings.HasPrefix(k, r.Id()+".") { - delete(keys, k) - } - } - } - } - - result := make([]string, 0, len(keys)) - for k, _ := range keys { - result = append(result, k) - } - - return result -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k, _ := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - -func (m *ModuleState) init() { - m.Lock() - defer m.Unlock() - - if m.Path == nil { - m.Path = []string{} - } - if m.Outputs == nil { - m.Outputs = make(map[string]*OutputState) - } - if m.Resources == nil { - m.Resources = make(map[string]*ResourceState) - } - - if m.Dependencies == nil { - m.Dependencies = make([]string, 0) - } - - for _, rs := range m.Resources { - rs.init() - } -} - -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - -// prune is used to remove any resources that are no longer required -func (m *ModuleState) prune() { - m.Lock() - defer m.Unlock() - - for k, v := range m.Resources { - if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { - delete(m.Resources, k) - continue - } - - v.prune() - } - - for k, v := range m.Outputs { - if v.Value == config.UnknownVariableValue { - delete(m.Outputs, k) - } - } - - m.Dependencies = uniqueStrings(m.Dependencies) -} - -func (m *ModuleState) sort() { - for _, v := range m.Resources { - v.sort() - } -} - -func (m *ModuleState) String() string { - m.Lock() - defer m.Unlock() - - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { - names = append(names, name) - } - - sort.Sort(resourceNameSort(names)) - - for _, k := range names { - rs := m.Resources[k] - var id string - if rs.Primary != nil { - id = rs.Primary.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary.Tainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(rs.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - if rs.Provider != "" { - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) - } - - var attributes map[string]string - if rs.Primary != nil { - attributes = rs.Primary.Attributes - } - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - for idx, t := range rs.Deposed { - taintStr := "" - if t.Tainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) - } - - if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range rs.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep)) - } - } - } - - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.Outputs)) - for k, _ := range m.Outputs { - ks = append(ks, k) - } - - sort.Strings(ks) - - for _, k := range ks { - v := m.Outputs[k] - switch vTyped := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key, _ := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - } - } - } - - return buf.String() -} - -// ResourceStateKey is a structured representation of the key used for the -// ModuleState.Resources mapping -type ResourceStateKey struct { - Name string - Type string - Mode config.ResourceMode - Index int -} - -// Equal determines whether two ResourceStateKeys are the same -func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { - if rsk == nil || other == nil { - return false - } - if rsk.Mode != other.Mode { - return false - } - if rsk.Type != other.Type { - return false - } - if rsk.Name != other.Name { - return false - } - if rsk.Index != other.Index { - return false - } - return true -} - -func (rsk *ResourceStateKey) String() string { - if rsk == nil { - return "" - } - var prefix string - switch rsk.Mode { - case config.ManagedResourceMode: - prefix = "" - case config.DataResourceMode: - prefix = "data." - default: - panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) - } - if rsk.Index == -1 { - return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) - } - return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) -} - -// ParseResourceStateKey accepts a key in the format used by -// ModuleState.Resources and returns a resource name and resource index. In the -// state, a resource has the format "type.name.index" or "type.name". In the -// latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { - parts := strings.Split(k, ".") - mode := config.ManagedResourceMode - if len(parts) > 0 && parts[0] == "data" { - mode = config.DataResourceMode - // Don't need the constant "data" prefix for parsing - // now that we've figured out the mode. - parts = parts[1:] - } - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("Malformed resource state key: %s", k) - } - rsk := &ResourceStateKey{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Index: -1, - } - if len(parts) == 3 { - index, err := strconv.Atoi(parts[2]) - if err != nil { - return nil, fmt.Errorf("Malformed resource state key index: %s", k) - } - rsk.Index = index - } - return rsk, nil -} - -// ResourceState holds the state of a resource that is used so that -// a provider can find and manage an existing resource as well as for -// storing attributes that are used to populate variables of child -// resources. -// -// Attributes has attributes about the created resource that are -// queryable in interpolation: "${type.id.attr}" -// -// Extra is just extra data that a provider can return that we store -// for later, but is not exposed in any way to the user. -// -type ResourceState struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *InstanceState `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*InstanceState `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -func (s *ResourceState) Lock() { s.mu.Lock() } -func (s *ResourceState) Unlock() { s.mu.Unlock() } - -// Equal tests whether two ResourceStates are equal. -func (s *ResourceState) Equal(other *ResourceState) bool { - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Provider != other.Provider { - return false - } - - // Dependencies must be equal - sort.Strings(s.Dependencies) - sort.Strings(other.Dependencies) - if len(s.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range s.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true -} - -// Taint marks a resource as tainted. -func (s *ResourceState) Taint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = true - } -} - -// Untaint unmarks a resource as tainted. -func (s *ResourceState) Untaint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = false - } -} - -func (s *ResourceState) init() { - s.Lock() - defer s.Unlock() - - if s.Primary == nil { - s.Primary = &InstanceState{} - } - s.Primary.init() - - if s.Dependencies == nil { - s.Dependencies = []string{} - } - - if s.Deposed == nil { - s.Deposed = make([]*InstanceState, 0) - } -} - -func (s *ResourceState) deepcopy() *ResourceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*ResourceState) -} - -// prune is used to remove any instances that are no longer required -func (s *ResourceState) prune() { - s.Lock() - defer s.Unlock() - - n := len(s.Deposed) - for i := 0; i < n; i++ { - inst := s.Deposed[i] - if inst == nil || inst.ID == "" { - copy(s.Deposed[i:], s.Deposed[i+1:]) - s.Deposed[n-1] = nil - n-- - i-- - } - } - s.Deposed = s.Deposed[:n] - - s.Dependencies = uniqueStrings(s.Dependencies) -} - -func (s *ResourceState) sort() { - s.Lock() - defer s.Unlock() - - sort.Strings(s.Dependencies) -} - -func (s *ResourceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) - return buf.String() -} - -// InstanceState is used to track the unique state information belonging -// to a given instance. -type InstanceState struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral EphemeralState `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` - - mu sync.Mutex -} - -func (s *InstanceState) Lock() { s.mu.Lock() } -func (s *InstanceState) Unlock() { s.mu.Unlock() } - -func (s *InstanceState) init() { - s.Lock() - defer s.Unlock() - - if s.Attributes == nil { - s.Attributes = make(map[string]string) - } - if s.Meta == nil { - s.Meta = make(map[string]interface{}) - } - s.Ephemeral.init() -} - -// Copy all the Fields from another InstanceState -func (s *InstanceState) Set(from *InstanceState) { - s.Lock() - defer s.Unlock() - - from.Lock() - defer from.Unlock() - - s.ID = from.ID - s.Attributes = from.Attributes - s.Ephemeral = from.Ephemeral - s.Meta = from.Meta - s.Tainted = from.Tainted -} - -func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*InstanceState) -} - -func (s *InstanceState) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return s.ID == "" -} - -func (s *InstanceState) Equal(other *InstanceState) bool { - // Short circuit some nil checks - if s == nil || other == nil { - return s == other - } - s.Lock() - defer s.Unlock() - - // IDs must be equal - if s.ID != other.ID { - return false - } - - // Attributes must be equal - if len(s.Attributes) != len(other.Attributes) { - return false - } - for k, v := range s.Attributes { - otherV, ok := other.Attributes[k] - if !ok { - return false - } - - if v != otherV { - return false - } - } - - // Meta must be equal - if len(s.Meta) != len(other.Meta) { - return false - } - if s.Meta != nil && other.Meta != nil { - // We only do the deep check if both are non-nil. If one is nil - // we treat it as equal since their lengths are both zero (check - // above). - if !reflect.DeepEqual(s.Meta, other.Meta) { - return false - } - } - - if s.Tainted != other.Tainted { - return false - } - - return true -} - -// MergeDiff takes a ResourceDiff and merges the attributes into -// this resource state in order to generate a new state. This new -// state can be used to provide updated attribute lookups for -// variable interpolation. -// -// If the diff attribute requires computing the value, and hence -// won't be available until apply, the value is replaced with the -// computeID. -func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { - result := s.DeepCopy() - if result == nil { - result = new(InstanceState) - } - result.init() - - if s != nil { - s.Lock() - defer s.Unlock() - for k, v := range s.Attributes { - result.Attributes[k] = v - } - } - if d != nil { - for k, diff := range d.CopyAttributes() { - if diff.NewRemoved { - delete(result.Attributes, k) - continue - } - if diff.NewComputed { - result.Attributes[k] = config.UnknownVariableValue - continue - } - - result.Attributes[k] = diff.New - } - } - - return result -} - -func (s *InstanceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - - if s == nil || s.ID == "" { - return "" - } - - buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) - - attributes := s.Attributes - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) - } - - buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) - - return buf.String() -} - -// EphemeralState is used for transient state that is only kept in-memory -type EphemeralState struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` - - // Type is used to specify the resource type for this instance. This is only - // required for import operations (as documented). If the documentation - // doesn't state that you need to set this, then don't worry about - // setting it. - Type string `json:"-"` -} - -func (e *EphemeralState) init() { - if e.ConnInfo == nil { - e.ConnInfo = make(map[string]string) - } -} - -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - buf := bufio.NewReader(src) - if _, err := buf.Peek(1); err != nil { - // the error is either io.EOF or "invalid argument", and both are from - // an empty state. - return nil, ErrNoState - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - -// resourceNameSort implements the sort.Interface to sort name parts lexically for -// strings and numerically for integer indexes. -type resourceNameSort []string - -func (r resourceNameSort) Len() int { return len(r) } -func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r resourceNameSort) Less(i, j int) bool { - iParts := strings.Split(r[i], ".") - jParts := strings.Split(r[j], ".") - - end := len(iParts) - if len(jParts) < end { - end = len(jParts) - } - - for idx := 0; idx < end; idx++ { - if iParts[idx] == jParts[idx] { - continue - } - - // sort on the first non-matching part - iInt, iIntErr := strconv.Atoi(iParts[idx]) - jInt, jIntErr := strconv.Atoi(jParts[idx]) - - switch { - case iIntErr == nil && jIntErr == nil: - // sort numerically if both parts are integers - return iInt < jInt - case iIntErr == nil: - // numbers sort before strings - return true - case jIntErr == nil: - return false - default: - return iParts[idx] < jParts[idx] - } - } - - return r[i] < r[j] -} - -// moduleStateSort implements sort.Interface to sort module states -type moduleStateSort []*ModuleState - -func (s moduleStateSort) Len() int { - return len(s) -} - -func (s moduleStateSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If either is nil, then the nil one is "less" than - if a == nil || b == nil { - return a == nil - } - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} - -func (s moduleStateSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -const stateValidateErrMultiModule = ` -Multiple modules with the same path: %s - -This means that there are multiple entries in the "modules" field -in your state file that point to the same module. This will cause Terraform -to behave in unexpected and error prone ways and is invalid. Please back up -and modify your state file manually to resolve this. -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go deleted file mode 100644 index 1163730..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_add.go +++ /dev/null @@ -1,374 +0,0 @@ -package terraform - -import "fmt" - -// Add adds the item in the state at the given address. -// -// The item can be a ModuleState, ResourceState, or InstanceState. Depending -// on the item type, the address may or may not be valid. For example, a -// module cannot be moved to a resource address, however a resource can be -// moved to a module address (it retains the same name, under that resource). -// -// The item can also be a []*ModuleState, which is the case for nested -// modules. In this case, Add will expect the zero-index to be the top-most -// module to add and will only nest children from there. For semantics, this -// is equivalent to module => module. -// -// The full semantics of Add: -// -// ┌───────────────────┬───────────────────┬───────────────────┐ -// │ Module Address │ Resource Address │ Instance Address │ -// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ModuleState │ ✓ │ x │ x │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ResourceState │ ✓ │ ✓ │ maybe* │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ Instance State │ ✓ │ ✓ │ ✓ │ -// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘ -// -// *maybe - Resources can be added at an instance address only if the resource -// represents a single instance (primary). Example: -// "aws_instance.foo" can be moved to "aws_instance.bar.tainted" -// -func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { - // Parse the address - - toAddr, err := ParseResourceAddress(toAddrRaw) - if err != nil { - return err - } - - // Parse the from address - fromAddr, err := ParseResourceAddress(fromAddrRaw) - if err != nil { - return err - } - - // Determine the types - from := detectValueAddLoc(raw) - to := detectAddrAddLoc(toAddr) - - // Find the function to do this - fromMap, ok := stateAddFuncs[from] - if !ok { - return fmt.Errorf("invalid source to add to state: %T", raw) - } - f, ok := fromMap[to] - if !ok { - return fmt.Errorf("invalid destination: %s (%d)", toAddr, to) - } - - // Call the migrator - if err := f(s, fromAddr, toAddr, raw); err != nil { - return err - } - - // Prune the state - s.prune() - return nil -} - -func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ModuleState or []*ModuleState. The former means - // we're moving just one module. The latter means we're moving a module - // and children. - root := raw - var rest []*ModuleState - if list, ok := raw.([]*ModuleState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("module move with no value to: %s", addr) - } - - // The first item is always the root - root = list[0] - if len(list) > 1 { - rest = list[1:] - } - } - - // Get the actual module state - src := root.(*ModuleState).deepcopy() - - // If the target module exists, it is an error - path := append([]string{"root"}, addr.Path...) - if s.ModuleByPath(path) != nil { - return fmt.Errorf("module target is not empty: %s", addr) - } - - // Create it and copy our outputs and dependencies - mod := s.AddModule(path) - mod.Outputs = src.Outputs - mod.Dependencies = src.Dependencies - - // Go through the resources perform an add for each of those - for k, v := range src.Resources { - resourceKey, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Update the resource address for this - addrCopy := *addr - addrCopy.Type = resourceKey.Type - addrCopy.Name = resourceKey.Name - addrCopy.Index = resourceKey.Index - addrCopy.Mode = resourceKey.Mode - - // Perform an add - if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { - return err - } - } - - // Add all the children if we have them - for _, item := range rest { - // If item isn't a descendent of our root, then ignore it - if !src.IsDescendent(item) { - continue - } - - // It is! Strip the leading prefix and attach that to our address - extra := item.Path[len(src.Path):] - addrCopy := addr.Copy() - addrCopy.Path = append(addrCopy.Path, extra...) - - // Add it - s.Add(fromAddr.String(), addrCopy.String(), item) - } - - return nil -} - -func stateAddFunc_Resource_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - // Build the more specific to addr - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ResourceState or []*ResourceState. The former means - // we're moving just one resource. The latter means we're moving a count - // of resources. - if list, ok := raw.([]*ResourceState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("resource move with no value to: %s", addr) - } - - // If there is an index, this is an error since we can't assign - // a set of resources to a single index - if addr.Index >= 0 && len(list) > 1 { - return fmt.Errorf( - "multiple resources can't be moved to a single index: "+ - "%s => %s", fromAddr, addr) - } - - // Add each with a specific index - for i, rs := range list { - addrCopy := addr.Copy() - addrCopy.Index = i - - if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil { - return err - } - } - - return nil - } - - src := raw.(*ResourceState).deepcopy() - - // Initialize the resource - resourceRaw, exists := stateAddInitAddr(s, addr) - if exists { - return fmt.Errorf("resource exists and not empty: %s", addr) - } - resource := resourceRaw.(*ResourceState) - resource.Type = src.Type - resource.Dependencies = src.Dependencies - resource.Provider = src.Provider - - // Move the primary - if src.Primary != nil { - addrCopy := *addr - addrCopy.InstanceType = TypePrimary - addrCopy.InstanceTypeSet = true - if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil { - return err - } - } - - // Move all deposed - if len(src.Deposed) > 0 { - resource.Deposed = src.Deposed - } - - return nil -} - -func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - src := raw.(*InstanceState).DeepCopy() - - // Create the instance - instanceRaw, _ := stateAddInitAddr(s, addr) - instance := instanceRaw.(*InstanceState) - - // Set it - instance.Set(src) - - return nil -} - -func stateAddFunc_Instance_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Instance_Resource( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.InstanceType = TypePrimary - addr.InstanceTypeSet = true - - return s.Add(from.String(), addr.String(), raw) -} - -// stateAddFunc is the type of function for adding an item to a state -type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error - -// stateAddFuncs has the full matrix mapping of the state adders. -var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc - -func init() { - stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{ - stateAddModule: { - stateAddModule: stateAddFunc_Module_Module, - }, - stateAddResource: { - stateAddModule: stateAddFunc_Resource_Module, - stateAddResource: stateAddFunc_Resource_Resource, - }, - stateAddInstance: { - stateAddInstance: stateAddFunc_Instance_Instance, - stateAddModule: stateAddFunc_Instance_Module, - stateAddResource: stateAddFunc_Instance_Resource, - }, - } -} - -// stateAddLoc is an enum to represent the location where state is being -// moved from/to. We use this for quick lookups in a function map. -type stateAddLoc uint - -const ( - stateAddInvalid stateAddLoc = iota - stateAddModule - stateAddResource - stateAddInstance -) - -// detectAddrAddLoc detects the state type for the given address. This -// function is specifically not unit tested since we consider the State.Add -// functionality to be comprehensive enough to cover this. -func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc { - if addr.Name == "" { - return stateAddModule - } - - if !addr.InstanceTypeSet { - return stateAddResource - } - - return stateAddInstance -} - -// detectValueAddLoc determines the stateAddLoc value from the raw value -// that is some State structure. -func detectValueAddLoc(raw interface{}) stateAddLoc { - switch raw.(type) { - case *ModuleState: - return stateAddModule - case []*ModuleState: - return stateAddModule - case *ResourceState: - return stateAddResource - case []*ResourceState: - return stateAddResource - case *InstanceState: - return stateAddInstance - default: - return stateAddInvalid - } -} - -// stateAddInitAddr takes a ResourceAddress and creates the non-existing -// resources up to that point, returning the empty (or existing) interface -// at that address. -func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { - addType := detectAddrAddLoc(addr) - - // Get the module - path := append([]string{"root"}, addr.Path...) - exists := true - mod := s.ModuleByPath(path) - if mod == nil { - mod = s.AddModule(path) - exists = false - } - if addType == stateAddModule { - return mod, exists - } - - // Add the resource - resourceKey := (&ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Index: addr.Index, - Mode: addr.Mode, - }).String() - exists = true - resource, ok := mod.Resources[resourceKey] - if !ok { - resource = &ResourceState{Type: addr.Type} - resource.init() - mod.Resources[resourceKey] = resource - exists = false - } - if addType == stateAddResource { - return resource, exists - } - - // Get the instance - exists = true - instance := &InstanceState{} - switch addr.InstanceType { - case TypePrimary, TypeTainted: - if v := resource.Primary; v != nil { - instance = resource.Primary - } else { - exists = false - } - case TypeDeposed: - idx := addr.Index - if addr.Index < 0 { - idx = 0 - } - if len(resource.Deposed) > idx { - instance = resource.Deposed[idx] - } else { - resource.Deposed = append(resource.Deposed, instance) - exists = false - } - } - - return instance, exists -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go deleted file mode 100644 index 2dcb11b..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go +++ /dev/null @@ -1,267 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" -) - -// StateFilter is responsible for filtering and searching a state. -// -// This is a separate struct from State rather than a method on State -// because StateFilter might create sidecar data structures to optimize -// filtering on the state. -// -// If you change the State, the filter created is invalid and either -// Reset should be called or a new one should be allocated. StateFilter -// will not watch State for changes and do this for you. If you filter after -// changing the State without calling Reset, the behavior is not defined. -type StateFilter struct { - State *State -} - -// Filter takes the addresses specified by fs and finds all the matches. -// The values of fs are resource addressing syntax that can be parsed by -// ParseResourceAddress. -func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { - // Parse all the addresses - as := make([]*ResourceAddress, len(fs)) - for i, v := range fs { - a, err := ParseResourceAddress(v) - if err != nil { - return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) - } - - as[i] = a - } - - // If we weren't given any filters, then we list all - if len(fs) == 0 { - as = append(as, &ResourceAddress{Index: -1}) - } - - // Filter each of the address. We keep track of this in a map to - // strip duplicates. - resultSet := make(map[string]*StateFilterResult) - for _, a := range as { - for _, r := range f.filterSingle(a) { - resultSet[r.String()] = r - } - } - - // Make the result list - results := make([]*StateFilterResult, 0, len(resultSet)) - for _, v := range resultSet { - results = append(results, v) - } - - // Sort them and return - sort.Sort(StateFilterResultSlice(results)) - return results, nil -} - -func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { - // The slice to keep track of results - var results []*StateFilterResult - - // Go through modules first. - modules := make([]*ModuleState, 0, len(f.State.Modules)) - for _, m := range f.State.Modules { - if f.relevant(a, m) { - modules = append(modules, m) - - // Only add the module to the results if we haven't specified a type. - // We also ignore the root module. - if a.Type == "" && len(m.Path) > 1 { - results = append(results, &StateFilterResult{ - Path: m.Path[1:], - Address: (&ResourceAddress{Path: m.Path[1:]}).String(), - Value: m, - }) - } - } - } - - // With the modules set, go through all the resources within - // the modules to find relevant resources. - for _, m := range modules { - for n, r := range m.Resources { - // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) - if err != nil { - // If we get an error parsing, then just ignore it - // out of the state. - continue - } - - // Older states and test fixtures often don't contain the - // type directly on the ResourceState. We add this so StateFilter - // is a bit more robust. - if r.Type == "" { - r.Type = key.Type - } - - if f.relevant(a, r) { - if a.Name != "" && a.Name != key.Name { - // Name doesn't match - continue - } - - if a.Index >= 0 && key.Index != a.Index { - // Index doesn't match - continue - } - - if a.Name != "" && a.Name != key.Name { - continue - } - - // Build the address for this resource - addr := &ResourceAddress{ - Path: m.Path[1:], - Name: key.Name, - Type: key.Type, - Index: key.Index, - } - - // Add the resource level result - resourceResult := &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Value: r, - } - if !a.InstanceTypeSet { - results = append(results, resourceResult) - } - - // Add the instances - if r.Primary != nil { - addr.InstanceType = TypePrimary - addr.InstanceTypeSet = false - results = append(results, &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Parent: resourceResult, - Value: r.Primary, - }) - } - - for _, instance := range r.Deposed { - if f.relevant(a, instance) { - addr.InstanceType = TypeDeposed - addr.InstanceTypeSet = true - results = append(results, &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Parent: resourceResult, - Value: instance, - }) - } - } - } - } - } - - return results -} - -// relevant checks for relevance of this address against the given value. -func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { - switch v := raw.(type) { - case *ModuleState: - path := v.Path[1:] - - if len(addr.Path) > len(path) { - // Longer path in address means there is no way we match. - return false - } - - // Check for a prefix match - for i, p := range addr.Path { - if path[i] != p { - // Any mismatches don't match. - return false - } - } - - return true - case *ResourceState: - if addr.Type == "" { - // If we have no resource type, then we're interested in all! - return true - } - - // If the type doesn't match we fail immediately - if v.Type != addr.Type { - return false - } - - return true - default: - // If we don't know about it, let's just say no - return false - } -} - -// StateFilterResult is a single result from a filter operation. Filter -// can match multiple things within a state (module, resource, instance, etc.) -// and this unifies that. -type StateFilterResult struct { - // Module path of the result - Path []string - - // Address is the address that can be used to reference this exact result. - Address string - - // Parent, if non-nil, is a parent of this result. For instances, the - // parent would be a resource. For resources, the parent would be - // a module. For modules, this is currently nil. - Parent *StateFilterResult - - // Value is the actual value. This must be type switched on. It can be - // any data structures that `State` can hold: `ModuleState`, - // `ResourceState`, `InstanceState`. - Value interface{} -} - -func (r *StateFilterResult) String() string { - return fmt.Sprintf("%T: %s", r.Value, r.Address) -} - -func (r *StateFilterResult) sortedType() int { - switch r.Value.(type) { - case *ModuleState: - return 0 - case *ResourceState: - return 1 - case *InstanceState: - return 2 - default: - return 50 - } -} - -// StateFilterResultSlice is a slice of results that implements -// sort.Interface. The sorting goal is what is most appealing to -// human output. -type StateFilterResultSlice []*StateFilterResult - -func (s StateFilterResultSlice) Len() int { return len(s) } -func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s StateFilterResultSlice) Less(i, j int) bool { - a, b := s[i], s[j] - - // if these address contain an index, we want to sort by index rather than name - addrA, errA := ParseResourceAddress(a.Address) - addrB, errB := ParseResourceAddress(b.Address) - if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { - return addrA.Index < addrB.Index - } - - // If the addresses are different it is just lexographic sorting - if a.Address != b.Address { - return a.Address < b.Address - } - - // Addresses are the same, which means it matters on the type - return a.sortedType() < b.sortedType() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go deleted file mode 100644 index aa13cce..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*State, error) { - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*ModuleState, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &State{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - newState.sort() - newState.init() - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &RemoteState{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root and catch - // duplicate path errors later (as part of Validate). - path = rootModulePath - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*OutputState) - for key, output := range old.Outputs { - outputs[key] = &OutputState{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*ResourceState) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &ModuleState{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*InstanceState, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &ResourceState{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - ephemeral, err := old.Ephemeral.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &InstanceState{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Ephemeral: *ephemeral, - Meta: newMeta, - }, nil -} - -func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { - connInfo, err := copystructure.Copy(old.ConnInfo) - if err != nil { - return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) - } - return &EphemeralState{ - ConnInfo: connInfo.(map[string]string), - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go deleted file mode 100644 index e52d35f..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" -) - -// The upgrade process from V2 to V3 state does not affect the structure, -// so we do not need to redeclare all of the structs involved - we just -// take a deep copy of the old structure and assert the version number is -// as we expect. -func upgradeStateV2ToV3(old *State) (*State, error) { - new := old.DeepCopy() - - // Ensure the copied version is v2 before attempting to upgrade - if new.Version != 2 { - return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + - "a state which is not version 2.") - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - if resource.Deposed != nil { - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *InstanceState) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go deleted file mode 100644 index 68cffb4..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -// stateV1 keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -// -// stateV1 is _only used for the purposes of backwards compatibility -// and is no longer used in Terraform. -// -// For the upgrade process, see state_upgrade_v1_to_v2.go -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral ephemeralStateV1 `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/test_failure b/vendor/github.com/hashicorp/terraform/terraform/test_failure deleted file mode 100644 index 5d3ad1a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/test_failure +++ /dev/null @@ -1,9 +0,0 @@ ---- FAIL: TestContext2Plan_moduleProviderInherit (0.01s) - context_plan_test.go:552: bad: []string{"child"} -map[string]dag.Vertex{} -"module.middle.null" -map[string]dag.Vertex{} -"module.middle.module.inner.null" -map[string]dag.Vertex{} -"aws" -FAIL diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go deleted file mode 100644 index 3f0418d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "os" - "testing" -) - -// TestStateFile writes the given state to the path. -func TestStateFile(t *testing.T, path string, state *State) { - f, err := os.Create(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - if err := WriteState(state, f); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go deleted file mode 100644 index f4a431a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ /dev/null @@ -1,52 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - for _, t := range t.Transforms { - if err := t.Transform(g); err != nil { - return err - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go deleted file mode 100644 index 10506ea..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,80 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // Must be implemented to determine the path for the configuration - GraphNodeSubPath - - // ProviderName with no module prefix. Example: "aws". - ProviderName() string - - // Sets the configuration - AttachProvider(*config.ProviderConfig) -} - -// AttachProviderConfigTransformer goes through the graph and attaches -// provider configuration structures to nodes that implement the interfaces -// above. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachProviderConfigTransformer struct { - Module *module.Tree // Module is the root module for the config -} - -func (t *AttachProviderConfigTransformer) Transform(g *Graph) error { - if err := t.attachProviders(g); err != nil { - return err - } - - return nil -} - -func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error { - // Go through and find GraphNodeAttachProvider - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - path := normalizeModulePath(apn.Path()) - path = path[1:] - name := apn.ProviderName() - log.Printf("[TRACE] Attach provider request: %#v %s", path, name) - - // Get the configuration. - tree := t.Module.Child(path) - if tree == nil { - continue - } - - // Go through the provider configs to find the matching config - for _, p := range tree.Config().ProviderConfigs { - // Build the name, which is "name.alias" if an alias exists - current := p.Name - if p.Alias != "" { - current += "." + p.Alias - } - - // If the configs match then attach! - if current == name { - log.Printf("[TRACE] Attaching provider config: %#v", p) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go deleted file mode 100644 index f2ee37e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,78 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - // ResourceAddr is the address to the resource - ResourceAddr() *ResourceAddress - - // Sets the configuration - AttachResourceConfig(*config.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement the interfaces -// above. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Module *module.Tree // Module is the root module for the config -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...") - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - log.Printf( - "[TRACE] AttachResourceConfigTransformer: Attach resource "+ - "config request: %s", addr) - - // Get the configuration. - path := normalizeModulePath(addr.Path) - path = path[1:] - tree := t.Module.Child(path) - if tree == nil { - continue - } - - // Go through the resource configs to find the matching config - for _, r := range tree.Config().Resources { - // Get a resource address so we can compare - a, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) - } - a.Path = addr.Path - - // If this is not the same resource, then continue - if !a.Equals(addr) { - continue - } - - log.Printf("[TRACE] Attaching resource config: %#v", r) - arn.AttachResourceConfig(r) - break - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go deleted file mode 100644 index 564ff08..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -type GraphNodeAttachResourceState interface { - // The address to the resource for the state - ResourceAddr() *ResourceAddress - - // Sets the state - AttachResourceState(*ResourceState) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any state: state is nil") - return nil - } - - filter := &StateFilter{State: t.State} - for _, v := range g.Vertices() { - // Only care about nodes requesting we're adding state - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceAddr() - - // Get the module state - results, err := filter.Filter(addr.String()) - if err != nil { - return err - } - - // Attach the first resource state we get - found := false - for _, result := range results { - if rs, ok := result.Value.(*ResourceState); ok { - log.Printf( - "[DEBUG] Attaching resource state to %q: %#v", - dag.VertexName(v), rs) - an.AttachResourceState(rs) - found = true - break - } - } - - if !found { - log.Printf( - "[DEBUG] Resource state not found for %q: %s", - dag.VertexName(v), addr) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go deleted file mode 100644 index 61bce85..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "errors" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Module *module.Tree - - // Unique will only add resources that aren't already present in the graph. - Unique bool - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode config.ResourceMode - - l sync.Mutex - uniqueMap map[string]struct{} -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - // Lock since we use some internal state - t.l.Lock() - defer t.l.Unlock() - - // If no module is given, we don't do anything - if t.Module == nil { - return nil - } - - // If the module isn't loaded, that is simply an error - if !t.Module.Loaded() { - return errors.New("module must be loaded for ConfigTransformer") - } - - // Reset the uniqueness map. If we're tracking uniques, then populate - // it with addresses. - t.uniqueMap = make(map[string]struct{}) - defer func() { t.uniqueMap = nil }() - if t.Unique { - for _, v := range g.Vertices() { - if rn, ok := v.(GraphNodeResource); ok { - t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} - } - } - } - - // Start the transformation process - return t.transform(g, t.Module) -} - -func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { - // If no config, do nothing - if m == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, m); err != nil { - return err - } - - // Transform all the children. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) - - // Get the configuration for this module - conf := m.Config() - - // Build the path we're at - path := m.Path() - - // Write all the resources out - for _, r := range conf.Resources { - // Build the resource address - addr, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) - } - addr.Path = path - - // If this is already in our uniqueness map, don't add it again - if _, ok := t.uniqueMap[addr.String()]; ok { - continue - } - - // Remove non-matching modes - if t.ModeFilter && addr.Mode != t.Mode { - continue - } - - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - // Add it to the graph - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go deleted file mode 100644 index 92f9888..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go +++ /dev/null @@ -1,80 +0,0 @@ -package terraform - -import ( - "errors" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// FlatConfigTransformer is a GraphTransformer that adds the configuration -// to the graph. The module used to configure this transformer must be -// the root module. -// -// This transform adds the nodes but doesn't connect any of the references. -// The ReferenceTransformer should be used for that. -// -// NOTE: In relation to ConfigTransformer: this is a newer generation config -// transformer. It puts the _entire_ config into the graph (there is no -// "flattening" step as before). -type FlatConfigTransformer struct { - Concrete ConcreteResourceNodeFunc // What to turn resources into - - Module *module.Tree -} - -func (t *FlatConfigTransformer) Transform(g *Graph) error { - // If no module, we do nothing - if t.Module == nil { - return nil - } - - // If the module is not loaded, that is an error - if !t.Module.Loaded() { - return errors.New("module must be loaded") - } - - return t.transform(g, t.Module) -} - -func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { - // If no module, no problem - if m == nil { - return nil - } - - // Transform all the children. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { - return err - } - } - - // Get the configuration for this module - config := m.Config() - - // Write all the resources out - for _, r := range config.Resources { - // Grab the address for this resource - addr, err := parseResourceAddressConfig(r) - if err != nil { - return err - } - addr.Path = m.Path() - - // Build the abstract resource. We have the config already so - // we'll just pre-populate that. - abstract := &NodeAbstractResource{ - Addr: addr, - Config: r, - } - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go deleted file mode 100644 index ec41258..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go +++ /dev/null @@ -1,23 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" -) - -// varNameForVar returns the VarName value for an interpolated variable. -// This value is compared to the VarName() value for the nodes within the -// graph to build the graph edges. -func varNameForVar(raw config.InterpolatedVariable) string { - switch v := raw.(type) { - case *config.ModuleVariable: - return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field) - case *config.ResourceVariable: - return v.ResourceId() - case *config.UserVariable: - return fmt.Sprintf("var.%s", v.Name) - default: - return "" - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go deleted file mode 100644 index 83415f3..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// CountBoundaryTransformer adds a node that depends on everything else -// so that it runs last in order to clean up the state for nodes that -// are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct{} - -func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{} - g.Add(node) - - // Depends on everything - for _, v := range g.Vertices() { - // Don't connect to ourselves - if v == node { - continue - } - - // Connect! - g.Connect(dag.BasicEdge(node, v)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go deleted file mode 100644 index 2148cef..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go +++ /dev/null @@ -1,168 +0,0 @@ -package terraform - -import "fmt" - -// DeposedTransformer is a GraphTransformer that adds deposed resources -// to the graph. -type DeposedTransformer struct { - // State is the global state. We'll automatically find the correct - // ModuleState based on the Graph.Path that is being transformed. - State *State - - // View, if non-empty, is the ModuleState.View used around the state - // to find deposed resources. - View string -} - -func (t *DeposedTransformer) Transform(g *Graph) error { - state := t.State.ModuleByPath(g.Path) - if state == nil { - // If there is no state for our module there can't be any deposed - // resources, since they live in the state. - return nil - } - - // If we have a view, apply it now - if t.View != "" { - state = state.View(t.View) - } - - // Go through all the resources in our state to look for deposed resources - for k, rs := range state.Resources { - // If we have no deposed resources, then move on - if len(rs.Deposed) == 0 { - continue - } - deposed := rs.Deposed - - for i, _ := range deposed { - g.Add(&graphNodeDeposedResource{ - Index: i, - ResourceName: k, - ResourceType: rs.Type, - Provider: rs.Provider, - }) - } - } - - return nil -} - -// graphNodeDeposedResource is the graph vertex representing a deposed resource. -type graphNodeDeposedResource struct { - Index int - ResourceName string - ResourceType string - Provider string -} - -func (n *graphNodeDeposedResource) Name() string { - return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) -} - -func (n *graphNodeDeposedResource) ProvidedBy() []string { - return []string{resourceProvider(n.ResourceName, n.Provider)} -} - -// GraphNodeEvalable impl. -func (n *graphNodeDeposedResource) EvalTree() EvalNode { - var provider ResourceProvider - var state *InstanceState - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // Build instance info - info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType} - seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) - - // Refresh the resource - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadStateDeposed{ - Name: n.ResourceName, - Output: &state, - Index: n.Index, - }, - &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Name: n.ResourceName, - ResourceType: n.ResourceType, - Provider: n.Provider, - State: &state, - Index: n.Index, - }, - }, - }, - }) - - // Apply - var diff *InstanceDiff - var err error - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadStateDeposed{ - Name: n.ResourceName, - Output: &state, - Index: n.Index, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diff, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diff, - Provider: &provider, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Name: n.ResourceName, - ResourceType: n.ResourceType, - Provider: n.Provider, - State: &state, - Index: n.Index, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - }) - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go deleted file mode 100644 index edfb460..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,257 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers. -type GraphNodeDestroyerCBD interface { - GraphNodeDestroyer - - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// CBDEdgeTransformer modifies the edges of CBD nodes that went through -// the DestroyEdgeTransformer to have the right dependencies. There are -// two real tasks here: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. A_d must depend on resources that depend on A. This is to enable -// the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Module *module.Tree - State *State -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...") - - // Go through and reverse any destroy edges - destroyMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD ancestors (dependent nodes), then we - // do nothing here. - if !t.hasCBDAncestor(g, v) { - continue - } - - // If this isn't naturally a CBD node, this means that an ancestor is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } - - // Find the destroy edge. There should only be one. - for _, e := range g.EdgesTo(v) { - // Not a destroy edge, ignore it - de, ok := e.(*DestroyEdge) - if !ok { - continue - } - - log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", - dag.VertexName(de.Source()), dag.VertexName(de.Target())) - - // Found it! Invert. - g.RemoveEdge(de) - g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) - } - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCountBoth) so it'll be caught. - addr := dn.DestroyAddr() - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } - - // Add this to the list of nodes that we need to fix up - // the edges for (step 2 above in the docs). - key := addr.String() - destroyMap[key] = append(destroyMap[key], v) - } - - // If we have no CBD nodes, then our work here is done - if len(destroyMap) == 0 { - return nil - } - - // We have CBD nodes. We now have to move on to the much more difficult - // task of connecting dependencies of the creation side of the destroy - // to the destruction node. The easiest way to explain this is an example: - // - // Given a pre-destroy dependence of: A => B - // And A has CBD set. - // - // The resulting graph should be: A => B => A_d - // - // They key here is that B happens before A is destroyed. This is to - // facilitate the primary purpose for CBD: making sure that downstreams - // are properly updated to avoid downtime before the resource is destroyed. - // - // We can't trust that the resource being destroyed or anything that - // depends on it is actually in our current graph so we make a new - // graph in order to determine those dependencies and add them in. - log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...") - depMap, err := t.depMap(destroyMap) - if err != nil { - return err - } - - // We now have the mapping of resource addresses to the destroy - // nodes they need to depend on. We now go through our own vertices to - // find any matching these addresses and make the connection. - for _, v := range g.Vertices() { - // We're looking for creators - rn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - // Get the address - addr := rn.CreateAddr() - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCount) so it'll be caught. - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } - - // If there is nothing this resource should depend on, ignore it - key := addr.String() - dns, ok := depMap[key] - if !ok { - continue - } - - // We have nodes! Make the connection - for _, dn := range dns { - log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", - dag.VertexName(dn), dag.VertexName(v)) - g.Connect(dag.BasicEdge(dn, v)) - } - } - - return nil -} - -func (t *CBDEdgeTransformer) depMap( - destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { - // Build the graph of our config, this ensures that all resources - // are present in the graph. - g, err := (&BasicGraphBuilder{ - Steps: []GraphTransformer{ - &FlatConfigTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, - &AttachStateTransformer{State: t.State}, - &ReferenceTransformer{}, - }, - Name: "CBDEdgeTransformer", - }).Build(nil) - if err != nil { - return nil, err - } - - // Using this graph, build the list of destroy nodes that each resource - // address should depend on. For example, when we find B, we map the - // address of B to A_d in the "depMap" variable below. - depMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Get the address - addr := rn.ResourceAddr() - key := addr.String() - - // Get the destroy nodes that are destroying this resource. - // If there aren't any, then we don't need to worry about - // any connections. - dns, ok := destroyMap[key] - if !ok { - continue - } - - // Get the nodes that depend on this on. In the example above: - // finding B in A => B. - for _, v := range g.UpEdges(v).List() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Keep track of the destroy nodes that this address - // needs to depend on. - key := rn.ResourceAddr().String() - depMap[key] = append(depMap[key], dns...) - } - } - - return depMap, nil -} - -// hasCBDAncestor returns true if any ancestor (node that depends on this) -// has CBD set. -func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool { - s, _ := g.Ancestors(v) - if s == nil { - return true - } - - for _, v := range s.List() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some ancestor is CreateBeforeDestroy, so we need to follow suit - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go deleted file mode 100644 index 22be1ab..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,269 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // ResourceAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *ResourceAddress -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // ResourceAddr is the address of the resource being created or updated - CreateAddr() *ResourceAddress -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // These are needed to properly build the graph of dependencies - // to determine what a destroy node depends on. Any of these can be nil. - Module *module.Tree - State *State -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...") - - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. In general there will only be one destroyer - // but to make it more robust we support multiple. - destroyers := make(map[string][]GraphNodeDestroyer) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - addr := dn.DestroyAddr() - if addr == nil { - continue - } - - key := addr.String() - log.Printf( - "[TRACE] DestroyEdgeTransformer: %s destroying %q", - dag.VertexName(dn), key) - destroyers[key] = append(destroyers[key], dn) - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - key := addr.String() - ds := destroyers[key] - if len(ds) == 0 { - continue - } - - for _, d := range ds { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(&DestroyEdge{S: a, T: a_d}) - } - } - - // This is strange but is the easiest way to get the dependencies - // of a node that is being destroyed. We use another graph to make sure - // the resource is in the graph and ask for references. We have to do this - // because the node that is being destroyed may NOT be in the graph. - // - // Example: resource A is force new, then destroy A AND create A are - // in the graph. BUT if resource A is just pure destroy, then only - // destroy A is in the graph, and create A is not. - providerFn := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{NodeAbstractProvider: a} - } - steps := []GraphTransformer{ - // Add outputs and metadata - &OutputTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, - &AttachStateTransformer{State: t.State}, - - // Add providers since they can affect destroy order as well - &MissingProviderTransformer{AllowAny: true, Concrete: providerFn}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: t.Module}, - - // Add all the variables. We can depend on resources through - // variables due to module parameters, and we need to properly - // determine that. - &RootVariableTransformer{Module: t.Module}, - &ModuleVariableTransformer{Module: t.Module}, - - &ReferenceTransformer{}, - } - - // Go through all the nodes being destroyed and create a graph. - // The resulting graph is only of things being CREATED. For example, - // following our example, the resulting graph would be: - // - // A, B (with no edges) - // - var tempG Graph - var tempDestroyed []dag.Vertex - for d, _ := range destroyers { - // d is what is being destroyed. We parse the resource address - // which it came from it is a panic if this fails. - addr, err := ParseResourceAddress(d) - if err != nil { - panic(err) - } - - // This part is a little bit weird but is the best way to - // find the dependencies we need to: build a graph and use the - // attach config and state transformers then ask for references. - abstract := &NodeAbstractResource{Addr: addr} - tempG.Add(abstract) - tempDestroyed = append(tempDestroyed, abstract) - - // We also add the destroy version here since the destroy can - // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResource{NodeAbstractResource: abstract} - tempG.Add(destroy) - tempDestroyed = append(tempDestroyed, destroy) - } - - // Run the graph transforms so we have the information we need to - // build references. - for _, s := range steps { - if err := s.Transform(&tempG); err != nil { - return err - } - } - - log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String()) - - // Go through all the nodes in the graph and determine what they - // depend on. - for _, v := range tempDestroyed { - // Find all ancestors of this to determine the edges we'll depend on - vs, err := tempG.Ancestors(v) - if err != nil { - return err - } - - refs := make([]dag.Vertex, 0, vs.Len()) - for _, raw := range vs.List() { - refs = append(refs, raw.(dag.Vertex)) - } - - refNames := make([]string, len(refs)) - for i, ref := range refs { - refNames[i] = dag.VertexName(ref) - } - log.Printf( - "[TRACE] DestroyEdgeTransformer: creation node %q references %s", - dag.VertexName(v), refNames) - - // If we have no references, then we won't need to do anything - if len(refs) == 0 { - continue - } - - // Get the destroy node for this. In the example of our struct, - // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - addr := rn.ResourceAddr() - if addr == nil { - continue - } - - dns := destroyers[addr.String()] - - // We have dependencies, check if any are being destroyed - // to build the list of things that we must depend on! - // - // In the example of the struct, if we have: - // - // B_d => A_d => A => B - // - // Then at this point in the algorithm we started with B_d, - // we built B (to get dependencies), and we found A. We're now looking - // to see if A_d exists. - var depDestroyers []dag.Vertex - for _, v := range refs { - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - addr := rn.ResourceAddr() - if addr == nil { - continue - } - - key := addr.String() - if ds, ok := destroyers[key]; ok { - for _, d := range ds { - depDestroyers = append(depDestroyers, d.(dag.Vertex)) - log.Printf( - "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", - key, dag.VertexName(d)) - } - } - } - - // Go through and make the connections. Use the variable - // names "a_d" and "b_d" to reference our example. - for _, a_d := range dns { - for _, b_d := range depDestroyers { - if b_d != a_d { - g.Connect(dag.BasicEdge(b_d, a_d)) - } - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go deleted file mode 100644 index ad46d3c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go +++ /dev/null @@ -1,86 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// DiffTransformer is a GraphTransformer that adds the elements of -// the diff to the graph. -// -// This transform is used for example by the ApplyGraphBuilder to ensure -// that only resources that are being modified are represented in the graph. -// -// Module and State is still required for the DiffTransformer for annotations -// since the Diff doesn't contain all the information required to build the -// complete graph (such as create-before-destroy information). The graph -// is built based on the diff first, though, ensuring that only resources -// that are being modified are present in the graph. -type DiffTransformer struct { - Concrete ConcreteResourceNodeFunc - - Diff *Diff - Module *module.Tree - State *State -} - -func (t *DiffTransformer) Transform(g *Graph) error { - // If the diff is nil or empty (nil is empty) then do nothing - if t.Diff.Empty() { - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer: starting") - var nodes []dag.Vertex - for _, m := range t.Diff.Modules { - log.Printf("[TRACE] DiffTransformer: Module: %s", m) - // TODO: If this is a destroy diff then add a module destroy node - - // Go through all the resources in this module. - for name, inst := range m.Resources { - log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) - - // We have changes! This is a create or update operation. - // First grab the address so we have a unique way to - // reference this resource. - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } - - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = m.Path[1:] - - // If we're destroying, add the destroy node - if inst.Destroy || inst.GetDestroyDeposed() { - abstract := &NodeAbstractResource{Addr: addr} - g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) - } - - // If we have changes, then add the applyable version - if len(inst.Attributes) > 0 { - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - nodes = append(nodes, node) - } - } - } - - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go deleted file mode 100644 index 982c098..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeExapndable is an interface that nodes can implement to -// signal that they can be expanded. Expanded nodes turn into -// GraphNodeSubgraph nodes within the graph. -type GraphNodeExpandable interface { - Expand(GraphBuilder) (GraphNodeSubgraph, error) -} - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - DynamicExpand(EvalContext) (*Graph, error) -} - -// GraphNodeSubgraph is an interface a node can implement if it has -// a larger subgraph that should be walked. -type GraphNodeSubgraph interface { - Subgraph() dag.Grapher -} - -// ExpandTransform is a transformer that does a subgraph expansion -// at graph transform time (vs. at eval time). The benefit of earlier -// subgraph expansion is that errors with the graph build can be detected -// at an earlier stage. -type ExpandTransform struct { - Builder GraphBuilder -} - -func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { - ev, ok := v.(GraphNodeExpandable) - if !ok { - // This isn't an expandable vertex, so just ignore it. - return v, nil - } - - // Expand the subgraph! - log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) - return ev.Expand(t.Builder) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go deleted file mode 100644 index 3673771..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" -) - -// ImportProviderValidateTransformer is a GraphTransformer that goes through -// the providers in the graph and validates that they only depend on variables. -type ImportProviderValidateTransformer struct{} - -func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - pv, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // We only care about providers that reference things - rn, ok := pv.(GraphNodeReferencer) - if !ok { - continue - } - - for _, ref := range rn.References() { - if !strings.HasPrefix(ref, "var.") { - return fmt.Errorf( - "Provider %q depends on non-var %q. Providers for import can currently\n"+ - "only depend on variables or must be hardcoded. You can stop import\n"+ - "from loading configurations by specifying `-config=\"\"`.", - pv.ProviderName(), ref) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go deleted file mode 100644 index 081df2f..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ /dev/null @@ -1,241 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// ImportStateTransformer is a GraphTransformer that adds nodes to the -// graph to represent the imports we want to do for resources. -type ImportStateTransformer struct { - Targets []*ImportTarget -} - -func (t *ImportStateTransformer) Transform(g *Graph) error { - nodes := make([]*graphNodeImportState, 0, len(t.Targets)) - for _, target := range t.Targets { - addr, err := ParseResourceAddress(target.Addr) - if err != nil { - return fmt.Errorf( - "failed to parse resource address '%s': %s", - target.Addr, err) - } - - nodes = append(nodes, &graphNodeImportState{ - Addr: addr, - ID: target.ID, - Provider: target.Provider, - }) - } - - // Build the graph vertices - for _, n := range nodes { - g.Add(n) - } - - return nil -} - -type graphNodeImportState struct { - Addr *ResourceAddress // Addr is the resource address to import to - ID string // ID is the ID to import as - Provider string // Provider string - - states []*InstanceState -} - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) -} - -func (n *graphNodeImportState) ProvidedBy() []string { - return []string{resourceProvider(n.Addr.Type, n.Provider)} -} - -// GraphNodeSubPath -func (n *graphNodeImportState) Path() []string { - return normalizeModulePath(n.Addr.Path) -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportState) EvalTree() EvalNode { - var provider ResourceProvider - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name), - ModulePath: n.Path(), - Type: n.Addr.Type, - } - - // Reset our states - n.states = nil - - // Return our sequence - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalImportState{ - Provider: &provider, - Info: info, - Id: n.ID, - Output: &n.states, - }, - }, - } -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]*ResourceAddress, len(n.states)) - for i, state := range n.states { - addr := *n.Addr - if t := state.Ephemeral.Type; t != "" { - addr.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = &addr - } - - // Verify that all the addresses are clear - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - filter := &StateFilter{State: state} - for _, addr := range addrs { - result, err := filter.Filter(addr.String()) - if err != nil { - return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) - } - - // Go through the filter results and it is an error if we find - // a matching InstanceState, meaning that we would have a collision. - for _, r := range result { - if _, ok := r.Value.(*InstanceState); ok { - return nil, fmt.Errorf( - "Can't import %s, would collide with an existing resource.\n\n"+ - "Please remove or rename this resource before continuing.", - addr) - } - } - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own EvalTree with the result of - // ImportState. Since DynamicExpand is always called after EvalTree, this - // is safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - Target: addrs[i], - Path_: n.Path(), - State: state, - Provider: n.Provider, - }) - } - - // Root transform for a single root - t := &RootTransformer{} - if err := t.Transform(g); err != nil { - return nil, err - } - - // Done! - return g, nil -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - Target *ResourceAddress - State *InstanceState - Path_ []string - Provider string -} - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) -} - -func (n *graphNodeImportStateSub) Path() []string { - return n.Path_ -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportStateSub) EvalTree() EvalNode { - // If the Ephemeral type isn't set, then it is an error - if n.State.Ephemeral.Type == "" { - err := fmt.Errorf( - "import of %s didn't set type for %s", - n.Target.String(), n.State.ID) - return &EvalReturnError{Error: &err} - } - - // DeepCopy so we're only modifying our local copy - state := n.State.DeepCopy() - - // Build the resource info - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name), - ModulePath: n.Path_, - Type: n.State.Ephemeral.Type, - } - - // Key is the resource key - key := &ResourceStateKey{ - Name: n.Target.Name, - Type: info.Type, - Index: n.Target.Index, - } - - // The eval sequence - var provider ResourceProvider - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: resourceProvider(info.Type, n.Provider), - Output: &provider, - }, - &EvalRefresh{ - Provider: &provider, - State: &state, - Info: info, - Output: &state, - }, - &EvalImportStateVerify{ - Info: info, - Id: n.State.ID, - State: &state, - }, - &EvalWriteState{ - Name: key.String(), - ResourceType: info.Type, - Provider: resourceProvider(info.Type, n.Provider), - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go deleted file mode 100644 index dbfd168..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// This only adds variables that are referenced by other things in the graph. -// If a module variable is not referenced, it won't be added to the graph. -type ModuleVariableTransformer struct { - Module *module.Tree - - DisablePrune bool // True if pruning unreferenced should be disabled - Input bool // True if this is from an Input operation. -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Module) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { - // If no config, no variables - if m == nil { - return nil - } - - // Transform all the children. This must be done BEFORE the transform - // above since child module variables can reference parent module variables. - for _, c := range m.Children() { - if err := t.transform(g, m, c); err != nil { - return err - } - } - - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, m); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { - // If we have no vars, we're done! - vars := m.Config().Variables - if len(vars) == 0 { - log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) - return nil - } - - // Look for usage of this module - var mod *config.Module - for _, modUse := range parent.Config().Modules { - if modUse.Name == m.Name() { - mod = modUse - break - } - } - if mod == nil { - log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) - return nil - } - - // Build the reference map so we can determine if we're referencing things. - refMap := NewReferenceMap(g.Vertices()) - - // Add all variables here - for _, v := range vars { - // Determine the value of the variable. If it isn't in the - // configuration then it was never set and that's not a problem. - var value *config.RawConfig - if raw, ok := mod.RawConfig.Raw[v.Name]; ok { - var err error - value, err = config.NewRawConfig(map[string]interface{}{ - v.Name: raw, - }) - if err != nil { - // This shouldn't happen because it is already in - // a RawConfig above meaning it worked once before. - panic(err) - } - } - - // Build the node. - // - // NOTE: For now this is just an "applyable" variable. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. - node := &NodeApplyableModuleVariable{ - PathValue: normalizeModulePath(m.Path()), - Config: v, - Value: value, - Module: t.Module, - Input: t.Input, - } - - if !t.DisablePrune { - // If the node is not referenced by anything, then we don't need - // to include it since it won't be used. - if matches := refMap.ReferencedBy(node); len(matches) == 0 { - log.Printf( - "[INFO] Not including %q in graph, nothing depends on it", - dag.VertexName(node)) - continue - } - } - - // Add it! - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go deleted file mode 100644 index b256a25..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go +++ /dev/null @@ -1,110 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" -) - -// OrphanResourceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc - - Count int // Actual count of the resource - Addr *ResourceAddress // Addr of the resource to look for orphans - State *State // Full global state -} - -func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] OrphanResourceCount: Starting...") - - // Grab the module in the state just for this resource address - ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) - if ms == nil { - // If no state, there can't be orphans - return nil - } - - orphanIndex := -1 - if t.Count == 1 { - orphanIndex = 0 - } - - // Go through the orphans and add them all to the state - for key, _ := range ms.Resources { - // Build the address - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err - } - addr.Path = ms.Path[1:] - - // Copy the address for comparison. If we aren't looking at - // the same resource, then just ignore it. - addrCopy := addr.Copy() - addrCopy.Index = -1 - if !addrCopy.Equals(t.Addr) { - continue - } - - log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) - - idx := addr.Index - - // If we have zero and the index here is 0 or 1, then we - // change the index to a high number so that we treat it as - // an orphan. - if t.Count <= 0 && idx <= 0 { - idx = t.Count + 1 - } - - // If we have a count greater than 0 and we're at the zero index, - // we do a special case check to see if our state also has a - // -1 index value. If so, this is an orphan because our rules are - // that if both a -1 and 0 are in the state, the 0 is destroyed. - if t.Count > 0 && idx == orphanIndex { - // This is a piece of cleverness (beware), but its simple: - // if orphanIndex is 0, then check -1, else check 0. - checkIndex := (orphanIndex + 1) * -1 - - key := &ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Mode: addr.Mode, - Index: checkIndex, - } - - if _, ok := ms.Resources[key.String()]; ok { - // We have a -1 index, too. Make an arbitrarily high - // index so that we always mark this as an orphan. - log.Printf( - "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d", - addr, orphanIndex) - idx = t.Count + 1 - } - } - - // If the index is within the count bounds, it is not an orphan - if idx < t.Count { - continue - } - - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - // Add it to the graph - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go deleted file mode 100644 index 49568d5..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ /dev/null @@ -1,64 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Module *module.Tree // Root module - State *State // State is the root state -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - return t.transform(g, t.Module) -} - -func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error { - // Get our configuration, and recurse into children - var c *config.Config - if m != nil { - c = m.Config() - for _, child := range m.Children() { - if err := t.transform(g, child); err != nil { - return err - } - } - } - - // Get the state. If there is no state, then we have no orphans! - path := normalizeModulePath(m.Path()) - state := t.State.ModuleByPath(path) - if state == nil { - return nil - } - - // Make a map of the valid outputs - valid := make(map[string]struct{}) - for _, o := range c.Outputs { - valid[o.Name] = struct{}{} - } - - // Go through the outputs and find the ones that aren't in our config. - for n, _ := range state.Outputs { - // If it is in the valid map, then ignore - if _, ok := valid[n]; ok { - continue - } - - // Orphan! - g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go deleted file mode 100644 index e42d3c8..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,78 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// OrphanResourceTransformer is a GraphTransformer that adds resource -// orphans to the graph. A resource orphan is a resource that is -// represented in the state but not in the configuration. -// -// This only adds orphans that have no representation at all in the -// configuration. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *State - - // Module is the root module. We'll look up the proper configuration - // using the graph path. - Module *module.Tree -} - -func (t *OrphanResourceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { - if ms == nil { - return nil - } - - // Get the configuration for this path. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var c *config.Config - if m := t.Module.Child(ms.Path[1:]); m != nil { - c = m.Config() - } - - // Go through the orphans and add them all to the state - for _, key := range ms.Orphans(c) { - // Build the abstract resource - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err - } - addr.Path = ms.Path[1:] - - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - // Add it to the graph - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go deleted file mode 100644 index b260f4c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ /dev/null @@ -1,59 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Module *module.Tree -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Module) -} - -func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { - // If no config, no outputs - if m == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { - return err - } - } - - // If we have no outputs, we're done! - os := m.Config().Outputs - if len(os) == 0 { - return nil - } - - // Add all outputs here - for _, o := range os { - // Build the node. - // - // NOTE: For now this is just an "applyable" output. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. - node := &NodeApplyableOutput{ - PathValue: normalizeModulePath(m.Path()), - Config: o, - } - - // Add it! - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go deleted file mode 100644 index b9695d5..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ /dev/null @@ -1,380 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. The ProviderName returned is the name of the provider -// they satisfy. -type GraphNodeProvider interface { - ProviderName() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - CloseProviderName() string -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the name of the provider -// to use. -type GraphNodeProviderConsumer interface { - ProvidedBy() []string -} - -// ProviderTransformer is a GraphTransformer that maps resources to -// providers within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProviderTransformer struct{} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to providers they need - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - target := m[providerMapKey(p, pv)] - if target == nil { - println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - g.Connect(dag.BasicEdge(v, target)) - } - } - } - - return err -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := closeProviderVertexMap(g) - var err error - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - key := p - source := cpm[key] - - if source == nil { - // Create a new graphNodeCloseProvider and add it to the graph - source = &graphNodeCloseProvider{ProviderNameValue: p} - g.Add(source) - - // Close node needs to depend on provider - provider, ok := pm[key] - if !ok { - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found for closing", - dag.VertexName(v), p)) - continue - } - g.Connect(dag.BasicEdge(source, provider)) - - // Make sure we also add the new graphNodeCloseProvider to the map - // so we don't create and add any duplicate graphNodeCloseProviders. - cpm[key] = source - } - - // Close node depends on all nodes provided by the provider - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds nodes -// for missing providers into the graph. Specifically, it creates provider -// configuration nodes for all the providers that we support. These are -// pruned later during an optimization pass. -type MissingProviderTransformer struct { - // Providers is the list of providers we support. - Providers []string - - // AllowAny will not check that a provider is supported before adding - // it to the graph. - AllowAny bool - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - // Create a set of our supported providers - supported := make(map[string]struct{}, len(t.Providers)) - for _, v := range t.Providers { - supported[v] = struct{}{} - } - - // Get the map of providers we already have in our graph - m := providerVertexMap(g) - - // Go through all the provider consumers and make sure we add - // that provider if it is missing. We use a for loop here instead - // of "range" since we'll modify check as we go to add more to check. - check := g.Vertices() - for i := 0; i < len(check); i++ { - v := check[i] - - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - var path []string - if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } - } - - for _, p := range pv.ProvidedBy() { - key := providerMapKey(p, pv) - if _, ok := m[key]; ok { - // This provider already exists as a configure node - continue - } - - // If the provider has an alias in it, we just want the type - ptype := p - if idx := strings.IndexRune(p, '.'); idx != -1 { - ptype = p[:idx] - } - - if !t.AllowAny { - if _, ok := supported[ptype]; !ok { - // If we don't support the provider type, skip it. - // Validation later will catch this as an error. - continue - } - } - - // Add the missing provider node to the graph - v := t.Concrete(&NodeAbstractProvider{ - NameValue: p, - PathValue: path, - }).(dag.Vertex) - if len(path) > 0 { - // We'll need the parent provider as well, so let's - // add a dummy node to check to make sure that we add - // that parent provider. - check = append(check, &graphNodeProviderConsumerDummy{ - ProviderValue: p, - PathValue: path[:len(path)-1], - }) - } - - m[key] = g.Add(v) - } - } - - return nil -} - -// ParentProviderTransformer connects provider nodes to their parents. -// -// This works by finding nodes that are both GraphNodeProviders and -// GraphNodeSubPath. It then connects the providers to their parent -// path. -type ParentProviderTransformer struct{} - -func (t *ParentProviderTransformer) Transform(g *Graph) error { - // Make a mapping of path to dag.Vertex, where path is: "path.name" - m := make(map[string]dag.Vertex) - - // Also create a map that maps a provider to its parent - parentMap := make(map[dag.Vertex]string) - for _, raw := range g.Vertices() { - // If it is the flat version, then make it the non-flat version. - // We eventually want to get rid of the flat version entirely so - // this is a stop-gap while it still exists. - var v dag.Vertex = raw - - // Only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { - continue - } - - // Also require a subpath, if there is no subpath then we - // just totally ignore it. The expectation of this transform is - // that it is used with a graph builder that is already flattened. - var path []string - if pn, ok := raw.(GraphNodeSubPath); ok { - path = pn.Path() - } - path = normalizeModulePath(path) - - // Build the key with path.name i.e. "child.subchild.aws" - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - m[key] = raw - - // Determine the parent if we're non-root. This is length 1 since - // the 0 index should be "root" since we normalize above. - if len(path) > 1 { - path = path[:len(path)-1] - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - parentMap[raw] = key - } - } - - // Connect! - for v, key := range parentMap { - if parent, ok := m[key]; ok { - g.Connect(dag.BasicEdge(v, parent)) - } - } - - return nil -} - -// PruneProviderTransformer is a GraphTransformer that prunes all the -// providers that aren't needed from the graph. A provider is unneeded if -// no resource or module is using that provider. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about the providers - if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { - continue - } - // Does anything depend on this? If not, then prune it. - if s := g.UpEdges(v); s.Len() == 0 { - if nv, ok := v.(dag.NamedVertex); ok { - log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) - } - g.Remove(v) - } - } - - return nil -} - -// providerMapKey is a helper that gives us the key to use for the -// maps returned by things such as providerVertexMap. -func providerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } - } - - return pathPrefix + k -} - -func providerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - key := providerMapKey(pv.ProviderName(), v) - m[key] = v - } - } - - return m -} - -func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvider); ok { - m[pv.CloseProviderName()] = v - } - } - - return m -} - -type graphNodeCloseProvider struct { - ProviderNameValue string -} - -func (n *graphNodeCloseProvider) Name() string { - return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.ProviderNameValue) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *graphNodeCloseProvider) CloseProviderName() string { - return n.ProviderNameValue -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// RemovableIfNotTargeted -func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// graphNodeProviderConsumerDummy is a struct that never enters the real -// graph (though it could to no ill effect). It implements -// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force -// certain transformations. -type graphNodeProviderConsumerDummy struct { - ProviderValue string - PathValue []string -} - -func (n *graphNodeProviderConsumerDummy) Path() []string { - return n.PathValue -} - -func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { - return []string{n.ProviderValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go deleted file mode 100644 index d9919f3..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go +++ /dev/null @@ -1,50 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// DisableProviderTransformer "disables" any providers that are not actually -// used by anything. This avoids the provider being initialized and configured. -// This both saves resources but also avoids errors since configuration -// may imply initialization which may require auth. -type DisableProviderTransformer struct{} - -func (t *DisableProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { - continue - } - - // If we have dependencies, then don't disable - if g.UpEdges(v).Len() > 0 { - continue - } - - // Get the path - var path []string - if pn, ok := v.(GraphNodeSubPath); ok { - path = pn.Path() - } - - // Disable the provider by replacing it with a "disabled" provider - disabled := &NodeDisabledProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - NameValue: pn.ProviderName(), - PathValue: path, - }, - } - - if !g.Replace(v, disabled) { - panic(fmt.Sprintf( - "vertex disappeared from under us: %s", - dag.VertexName(v))) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go deleted file mode 100644 index f49d824..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ /dev/null @@ -1,206 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the name of the -// provisioner to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - key := provisionerMapKey(p, pv) - if m[key] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - g.Connect(dag.BasicEdge(v, m[key])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - var path []string - if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } - } - - for _, p := range pv.ProvisionedBy() { - // Build the key for storing in the map - key := provisionerMapKey(p, pv) - - if _, ok := m[key]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - PathValue: path, - } - - // Add the missing provisioner node to the graph - m[key] = g.Add(newV) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -// provisionerMapKey is a helper that gives us the key to use for the -// maps returned by things such as provisionerVertexMap. -func provisionerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } - } - - return pathPrefix + k -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - key := provisionerMapKey(pv.ProvisionerName(), v) - m[key] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { - return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go deleted file mode 100644 index c545235..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ /dev/null @@ -1,321 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - // ReferenceableName is the name by which this can be referenced. - // This can be either just the type, or include the field. Example: - // "aws_instance.bar" or "aws_instance.bar.id". - ReferenceableName() []string -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - // References are the list of things that this node references. This - // can include fields or just the type, just like GraphNodeReferenceable - // above. - References() []string -} - -// GraphNodeReferenceGlobal is an interface that can optionally be -// implemented. If ReferenceGlobal returns true, then the References() -// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" -// etc. -// -// This allows a node to reference and be referenced by a specific name -// that may cross module boundaries. This can be very dangerous so use -// this wisely. -// -// The primary use case for this is module boundaries (variables coming in). -type GraphNodeReferenceGlobal interface { - // Set to true to signal that references and name are fully - // qualified. See the above docs for more information. - ReferenceGlobal() bool -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - parents, _ := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - g.Connect(dag.BasicEdge(v, parent)) - } - } - - return nil -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph. -type ReferenceMap struct { - // m is the mapping of referenceable name to list of verticies that - // implement that name. This is built on initialization. - references map[string][]dag.Vertex - referencedBy map[string][]dag.Vertex -} - -// References returns the list of vertices that this vertex -// references along with any missing references. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil, nil - } - - var matches []dag.Vertex - var missing []string - prefix := m.prefix(v) - for _, ns := range rn.References() { - found := false - for _, n := range strings.Split(ns, "/") { - n = prefix + n - parents, ok := m.references[n] - if !ok { - continue - } - - // Mark that we found a match - found = true - - // Make sure this isn't a self reference, which isn't included - selfRef := false - for _, p := range parents { - if p == v { - selfRef = true - break - } - } - if selfRef { - continue - } - - matches = append(matches, parents...) - break - } - - if !found { - missing = append(missing, ns) - } - } - - return matches, missing -} - -// ReferencedBy returns the list of vertices that reference the -// vertex passed in. -func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferenceable) - if !ok { - return nil - } - - var matches []dag.Vertex - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - children, ok := m.referencedBy[n] - if !ok { - continue - } - - // Make sure this isn't a self reference, which isn't included - selfRef := false - for _, p := range children { - if p == v { - selfRef = true - break - } - } - if selfRef { - continue - } - - matches = append(matches, children...) - } - - return matches -} - -func (m *ReferenceMap) prefix(v dag.Vertex) string { - // If the node is stating it is already fully qualified then - // we don't have to create the prefix! - if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { - return "" - } - - // Create the prefix based on the path - var prefix string - if pn, ok := v.(GraphNodeSubPath); ok { - if path := normalizeModulePath(pn.Path()); len(path) > 1 { - prefix = modulePrefixStr(path) + "." - } - } - - return prefix -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { - var m ReferenceMap - - // Build the lookup table - refMap := make(map[string][]dag.Vertex) - for _, v := range vs { - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - refMap[n] = append(refMap[n], v) - } - - // If there is a path, it is always referenceable by that. For - // example, if this is a referenceable thing at path []string{"foo"}, - // then it can be referenced at "module.foo" - if pn, ok := v.(GraphNodeSubPath); ok { - for _, p := range ReferenceModulePath(pn.Path()) { - refMap[p] = append(refMap[p], v) - } - } - } - - // Build the lookup table for referenced by - refByMap := make(map[string][]dag.Vertex) - for _, v := range vs { - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferencer) - if !ok { - continue - } - - // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.References() { - n = prefix + n - refByMap[n] = append(refByMap[n], v) - } - } - - m.references = refMap - m.referencedBy = refByMap - return &m -} - -// Returns the reference name for a module path. The path "foo" would return -// "module.foo". If this is a deeply nested module, it will be every parent -// as well. For example: ["foo", "bar"] would return both "module.foo" and -// "module.foo.module.bar" -func ReferenceModulePath(p []string) []string { - p = normalizeModulePath(p) - if len(p) == 1 { - // Root, no name - return nil - } - - result := make([]string, 0, len(p)-1) - for i := len(p); i > 1; i-- { - result = append(result, modulePrefixStr(p[:i])) - } - - return result -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(c *config.RawConfig) []string { - var result []string - for _, v := range c.Variables { - if r := ReferenceFromInterpolatedVar(v); len(r) > 0 { - result = append(result, r...) - } - } - - return result -} - -// ReferenceFromInterpolatedVar returns the reference from this variable, -// or an empty string if there is no reference. -func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { - switch v := v.(type) { - case *config.ModuleVariable: - return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)} - case *config.ResourceVariable: - id := v.ResourceId() - - // If we have a multi-reference (splat), then we depend on ALL - // resources with this type/name. - if v.Multi && v.Index == -1 { - return []string{fmt.Sprintf("%s.*", id)} - } - - // Otherwise, we depend on a specific index. - idx := v.Index - if !v.Multi || v.Index == -1 { - idx = 0 - } - - // Depend on the index, as well as "N" which represents the - // un-expanded set of resources. - return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} - case *config.UserVariable: - return []string{fmt.Sprintf("var.%s", v.Name)} - default: - return nil - } -} - -func modulePrefixStr(p []string) string { - parts := make([]string, 0, len(p)*2) - for _, p := range p[1:] { - parts = append(parts, "module", p) - } - - return strings.Join(parts, ".") -} - -func modulePrefixList(result []string, prefix string) []string { - if prefix != "" { - for i, v := range result { - result[i] = fmt.Sprintf("%s.%s", prefix, v) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go deleted file mode 100644 index cda35cb..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ /dev/null @@ -1,51 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc - - Count int - Addr *ResourceAddress -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - // Don't allow the count to be negative - if t.Count < 0 { - return fmt.Errorf("negative count: %d", t.Count) - } - - // For each count, build and add the node - for i := 0; i < t.Count; i++ { - // Set the index. If our count is 1 we special case it so that - // we handle the "resource.0" and "resource" boundary properly. - index := i - if t.Count == 1 { - index = -1 - } - - // Build the resource address - addr := t.Addr.Copy() - addr.Index = index - - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - // Add it to the graph - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go deleted file mode 100644 index aee053d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/dag" - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - // If we already have a good root, we're done - if _, err := g.Root(); err == nil { - return nil - } - - // Add a root - var root graphNodeRoot - g.Add(root) - - // Connect the root to all the edges that need it - for _, v := range g.Vertices() { - if v == root { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(root, v)) - } - } - - return nil -} - -type graphNodeRoot struct{} - -func (n graphNodeRoot) Name() string { - return rootNodeName -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go deleted file mode 100644 index 471cd74..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/dag" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - Concrete ConcreteResourceNodeFunc - - State *State -} - -func (t *StateTransformer) Transform(g *Graph) error { - // If the state is nil or empty (nil is empty) then do nothing - if t.State.Empty() { - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] StateTransformer: starting") - var nodes []dag.Vertex - for _, ms := range t.State.Modules { - log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) - - // Go through all the resources in this module. - for name, rs := range ms.Resources { - log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs) - - // Add the resource to the graph - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } - - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = ms.Path[1:] - - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - nodes = append(nodes, node) - } - } - - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go deleted file mode 100644 index 4f117b4..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ /dev/null @@ -1,230 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]ResourceAddress) -} - -// GraphNodeTargetDownstream is an interface for graph nodes that need to -// be remain present under targeting if any of their dependencies are targeted. -// TargetDownstream is called with the set of vertices that are direct -// dependencies for the node, and it should return true if the node must remain -// in the graph in support of those dependencies. -// -// This is used in situations where the dependency edges are representing an -// ordering relationship but the dependency must still be visited if its -// dependencies are visited. This is true for outputs, for example, since -// they must get updated if any of their dependent resources get updated, -// which would not normally be true if one of their dependencies were targeted. -type GraphNodeTargetDownstream interface { - TargetDownstream(targeted, untargeted *dag.Set) bool -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []string - - // List of parsed targets, provided by callers like ResourceCountTransform - // that already have the targets parsed - ParsedTargets []ResourceAddress - - // If set, the index portions of resource addresses will be ignored - // for comparison. This is used when transforming a graph where - // counted resources have not yet been expanded, since otherwise - // the unexpanded nodes (which never have indices) would not match. - IgnoreIndices bool - - // Set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { - addrs, err := t.parseTargetAddresses() - if err != nil { - return err - } - - t.ParsedTargets = addrs - } - - if len(t.ParsedTargets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - removable := false - if _, ok := v.(GraphNodeResource); ok { - removable = true - } - if vr, ok := v.(RemovableIfNotTargeted); ok { - removable = vr.RemoveIfNotTargeted() - } - if removable && !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { - addrs := make([]ResourceAddress, len(t.Targets)) - for i, target := range t.Targets { - ta, err := ParseResourceAddress(target) - if err != nil { - return nil, err - } - addrs[i] = *ta - } - - return addrs, nil -} - -// Returns the list of targeted nodes. A targeted node is either addressed -// directly, or is an Ancestor of a targeted node. Destroy mode keeps -// Descendents instead of Ancestors. -func (t *TargetsTransformer) selectTargetedNodes( - g *Graph, addrs []ResourceAddress) (*dag.Set, error) { - targetedNodes := new(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - var deps *dag.Set - var err error - if t.Destroy { - deps, err = g.Descendents(v) - } else { - deps, err = g.Ancestors(v) - } - if err != nil { - return nil, err - } - - for _, d := range deps.List() { - targetedNodes.Add(d) - } - } - } - - // Handle nodes that need to be included if their dependencies are included. - // This requires multiple passes since we need to catch transitive - // dependencies if and only if they are via other nodes that also - // support TargetDownstream. For example: - // output -> output -> targeted-resource: both outputs need to be targeted - // output -> non-targeted-resource -> targeted-resource: output not targeted - // - // We'll keep looping until we stop targeting more nodes. - queue := targetedNodes.List() - for len(queue) > 0 { - vertices := queue - queue = nil // ready to append for next iteration if neccessary - for _, v := range vertices { - dependers := g.UpEdges(v) - if dependers == nil { - // indicates that there are no up edges for this node, so - // we have nothing to do here. - continue - } - - dependers = dependers.Filter(func(dv interface{}) bool { - // Can ignore nodes that are already targeted - /*if targetedNodes.Include(dv) { - return false - }*/ - - _, ok := dv.(GraphNodeTargetDownstream) - return ok - }) - - if dependers.Len() == 0 { - continue - } - - for _, dv := range dependers.List() { - if targetedNodes.Include(dv) { - // Already present, so nothing to do - continue - } - - // We'll give the node some information about what it's - // depending on in case that informs its decision about whether - // it is safe to be targeted. - deps := g.DownEdges(v) - depsTargeted := deps.Intersection(targetedNodes) - depsUntargeted := deps.Difference(depsTargeted) - - if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { - targetedNodes.Add(dv) - // Need to visit this node on the next pass to see if it - // has any transitive dependers. - queue = append(queue, dv) - } - } - } - } - - return targetedNodes, nil -} - -func (t *TargetsTransformer) nodeIsTarget( - v dag.Vertex, addrs []ResourceAddress) bool { - r, ok := v.(GraphNodeResource) - if !ok { - return false - } - - addr := r.ResourceAddr() - for _, targetAddr := range addrs { - if t.IgnoreIndices { - // targetAddr is not a pointer, so we can safely mutate it without - // interfering with references elsewhere. - targetAddr.Index = -1 - } - if targetAddr.Contains(addr) { - return true - } - } - - return false -} - -// RemovableIfNotTargeted is a special interface for graph nodes that -// aren't directly addressable, but need to be removed from the graph when they -// are not targeted. (Nodes that are not directly targeted end up in the set of -// targeted nodes because something that _is_ targeted depends on them.) The -// initial use case for this interface is GraphNodeConfigVariable, which was -// having trouble interpolating for module variables in targeted scenarios that -// filtered out the resource node being referenced. -type RemovableIfNotTargeted interface { - RemoveIfNotTargeted() bool -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go deleted file mode 100644 index 2184278..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that performs -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see Wikipedia. -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go deleted file mode 100644 index b31e2c7..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/config/module" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Module *module.Tree -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // If no config, no variables - if t.Module == nil { - return nil - } - - // If we have no vars, we're done! - vars := t.Module.Config().Variables - if len(vars) == 0 { - return nil - } - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Config: v, - } - - // Add it! - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go deleted file mode 100644 index 6b1293f..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go deleted file mode 100644 index 7c87459..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(*InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go deleted file mode 100644 index e3a07ef..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go +++ /dev/null @@ -1,23 +0,0 @@ -package terraform - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go deleted file mode 100644 index 2207d1d..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(opts) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go deleted file mode 100644 index 84427c6..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go deleted file mode 100644 index 7852bc4..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ /dev/null @@ -1,16 +0,0 @@ -package terraform - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go deleted file mode 100644 index 878a031..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,15 +0,0 @@ -package terraform - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - Info *InstanceInfo - Type string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.Info, o.Type, msg) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go deleted file mode 100644 index 700be2a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go +++ /dev/null @@ -1,14 +0,0 @@ -package terraform - -import ( - "fmt" - "runtime" -) - -// The standard Terraform User-Agent format -const UserAgent = "Terraform %s (%s)" - -// Generate a UserAgent string -func UserAgentString() string { - return fmt.Sprintf(UserAgent, VersionString(), runtime.Version()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go deleted file mode 100644 index 752241a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/util.go +++ /dev/null @@ -1,81 +0,0 @@ -package terraform - -import ( - "sort" - - "github.com/hashicorp/terraform/config" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n == 0 { - panic("semaphore with limit 0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -func resourceProvider(resourceType, explicitProvider string) string { - return config.ResourceProviderFullName(resourceType, explicitProvider) -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go deleted file mode 100644 index 300f2ad..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ /dev/null @@ -1,166 +0,0 @@ -package terraform - -import ( - "fmt" - "os" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" -) - -// Variables returns the fully loaded set of variables to use with -// ContextOpts and NewContext, loading any additional variables from -// the environment or any other sources. -// -// The given module tree doesn't need to be loaded. -func Variables( - m *module.Tree, - override map[string]interface{}) (map[string]interface{}, error) { - result := make(map[string]interface{}) - - // Variables are loaded in the following sequence. Each additional step - // will override conflicting variable keys from prior steps: - // - // * Take default values from config - // * Take values from TF_VAR_x env vars - // * Take values specified in the "override" param which is usually - // from -var, -var-file, etc. - // - - // First load from the config - for _, v := range m.Config().Variables { - // If the var has no default, ignore - if v.Default == nil { - continue - } - - // If the type isn't a string, we use it as-is since it is a rich type - if v.Type() != config.VariableTypeString { - result[v.Name] = v.Default - continue - } - - // v.Default has already been parsed as HCL but it may be an int type - switch typedDefault := v.Default.(type) { - case string: - if typedDefault == "" { - continue - } - result[v.Name] = typedDefault - case int, int64: - result[v.Name] = fmt.Sprintf("%d", typedDefault) - case float32, float64: - result[v.Name] = fmt.Sprintf("%f", typedDefault) - case bool: - result[v.Name] = fmt.Sprintf("%t", typedDefault) - default: - panic(fmt.Sprintf( - "Unknown default var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - v.Default)) - } - } - - // Load from env vars - for _, v := range os.Environ() { - if !strings.HasPrefix(v, VarEnvPrefix) { - continue - } - - // Strip off the prefix and get the value after the first "=" - idx := strings.Index(v, "=") - k := v[len(VarEnvPrefix):idx] - v = v[idx+1:] - - // Override the configuration-default values. Note that *not* finding the variable - // in configuration is OK, as we don't want to preclude people from having multiple - // sets of TF_VAR_whatever in their environment even if it is a little weird. - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } - - varType := schema.Type() - varVal, err := parseVariableAsHCL(k, v, varType) - if err != nil { - return nil, err - } - - switch varType { - case config.VariableTypeMap: - if err := varSetMap(result, k, varVal); err != nil { - return nil, err - } - default: - result[k] = varVal - } - } - } - - // Load from overrides - for k, v := range override { - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } - - switch schema.Type() { - case config.VariableTypeList: - result[k] = v - case config.VariableTypeMap: - if err := varSetMap(result, k, v); err != nil { - return nil, err - } - case config.VariableTypeString: - // Convert to a string and set. We don't catch any errors - // here because the validation step later should catch - // any type errors. - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - result[k] = strVal - } else { - result[k] = v - } - default: - panic(fmt.Sprintf( - "Unhandled var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - schema.Type())) - } - } - } - - return result, nil -} - -// varSetMap sets or merges the map in "v" with the key "k" in the -// "current" set of variables. This is just a private function to remove -// duplicate logic in Variables -func varSetMap(current map[string]interface{}, k string, v interface{}) error { - existing, ok := current[k] - if !ok { - current[k] = v - return nil - } - - existingMap, ok := existing.(map[string]interface{}) - if !ok { - panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) - } - - switch typedV := v.(type) { - case []map[string]interface{}: - for newKey, newVal := range typedV[0] { - existingMap[newKey] = newVal - } - case map[string]interface{}: - for newKey, newVal := range typedV { - existingMap[newKey] = newVal - } - default: - return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go deleted file mode 100644 index de9296a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ /dev/null @@ -1,31 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -const Version = "0.10.2" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var VersionPrerelease = "" - -// SemVersion is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVersion = version.Must(version.NewVersion(Version)) - -// VersionHeader is the header name used to send the current terraform version -// in http requests. -const VersionHeader = "Terraform-Version" - -func VersionString() string { - if VersionPrerelease != "" { - return fmt.Sprintf("%s-%s", Version, VersionPrerelease) - } - return Version -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go deleted file mode 100644 index 3cbbf56..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ /dev/null @@ -1,69 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) - -// checkRequiredVersion verifies that any version requirements specified by -// the configuration are met. -// -// This checks the root module as well as any additional version requirements -// from child modules. -// -// This is tested in context_test.go. -func checkRequiredVersion(m *module.Tree) error { - // Check any children - for _, c := range m.Children() { - if err := checkRequiredVersion(c); err != nil { - return err - } - } - - var tf *config.Terraform - if c := m.Config(); c != nil { - tf = c.Terraform - } - - // If there is no Terraform config or the required version isn't set, - // we move on. - if tf == nil || tf.RequiredVersion == "" { - return nil - } - - // Path for errors - module := "root" - if path := normalizeModulePath(m.Path()); len(path) > 1 { - module = modulePrefixStr(path) - } - - // Check this version requirement of this module - cs, err := version.NewConstraint(tf.RequiredVersion) - if err != nil { - return fmt.Errorf( - "%s: terraform.required_version %q syntax error: %s", - module, - tf.RequiredVersion, err) - } - - if !cs.Check(SemVersion) { - return fmt.Errorf( - "The currently running version of Terraform doesn't meet the\n"+ - "version requirements explicitly specified by the configuration.\n"+ - "Please use the required version or update the configuration.\n"+ - "Note that version requirements are usually set for a reason, so\n"+ - "we recommend verifying with whoever set the version requirements\n"+ - "prior to making any manual changes.\n\n"+ - " Module: %s\n"+ - " Required version: %s\n"+ - " Current version: %s", - module, - tf.RequiredVersion, - SemVersion) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go deleted file mode 100644 index cbd78dd..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "fmt" - -const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" - -var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE deleted file mode 100644 index f0e5c79..0000000 --- a/vendor/github.com/hashicorp/yamux/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md deleted file mode 100644 index d4db7fc..0000000 --- a/vendor/github.com/hashicorp/yamux/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Yamux - -Yamux (Yet another Multiplexer) is a multiplexing library for Golang. -It relies on an underlying connection to provide reliability -and ordering, such as TCP or Unix domain sockets, and provides -stream-oriented multiplexing. It is inspired by SPDY but is not -interoperable with it. - -Yamux features include: - -* Bi-directional streams - * Streams can be opened by either client or server - * Useful for NAT traversal - * Server-side push support -* Flow control - * Avoid starvation - * Back-pressure to prevent overwhelming a receiver -* Keep Alives - * Enables persistent connections over a load balancer -* Efficient - * Enables thousands of logical streams with low overhead - -## Documentation - -For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). - -## Specification - -The full specification for Yamux is provided in the `spec.md` file. -It can be used as a guide to implementors of interoperable libraries. - -## Usage - -Using Yamux is remarkably simple: - -```go - -func client() { - // Get a TCP connection - conn, err := net.Dial(...) - if err != nil { - panic(err) - } - - // Setup client side of yamux - session, err := yamux.Client(conn, nil) - if err != nil { - panic(err) - } - - // Open a new stream - stream, err := session.Open() - if err != nil { - panic(err) - } - - // Stream implements net.Conn - stream.Write([]byte("ping")) -} - -func server() { - // Accept a TCP connection - conn, err := listener.Accept() - if err != nil { - panic(err) - } - - // Setup server side of yamux - session, err := yamux.Server(conn, nil) - if err != nil { - panic(err) - } - - // Accept a stream - stream, err := session.Accept() - if err != nil { - panic(err) - } - - // Listen for a message - buf := make([]byte, 4) - stream.Read(buf) -} - -``` - diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go deleted file mode 100644 index be6ebca..0000000 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ /dev/null @@ -1,60 +0,0 @@ -package yamux - -import ( - "fmt" - "net" -) - -// hasAddr is used to get the address from the underlying connection -type hasAddr interface { - LocalAddr() net.Addr - RemoteAddr() net.Addr -} - -// yamuxAddr is used when we cannot get the underlying address -type yamuxAddr struct { - Addr string -} - -func (*yamuxAddr) Network() string { - return "yamux" -} - -func (y *yamuxAddr) String() string { - return fmt.Sprintf("yamux:%s", y.Addr) -} - -// Addr is used to get the address of the listener. -func (s *Session) Addr() net.Addr { - return s.LocalAddr() -} - -// LocalAddr is used to get the local address of the -// underlying connection. -func (s *Session) LocalAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"local"} - } - return addr.LocalAddr() -} - -// RemoteAddr is used to get the address of remote end -// of the underlying connection -func (s *Session) RemoteAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"remote"} - } - return addr.RemoteAddr() -} - -// LocalAddr returns the local address -func (s *Stream) LocalAddr() net.Addr { - return s.session.LocalAddr() -} - -// LocalAddr returns the remote address -func (s *Stream) RemoteAddr() net.Addr { - return s.session.RemoteAddr() -} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go deleted file mode 100644 index 4f52938..0000000 --- a/vendor/github.com/hashicorp/yamux/const.go +++ /dev/null @@ -1,157 +0,0 @@ -package yamux - -import ( - "encoding/binary" - "fmt" -) - -var ( - // ErrInvalidVersion means we received a frame with an - // invalid version - ErrInvalidVersion = fmt.Errorf("invalid protocol version") - - // ErrInvalidMsgType means we received a frame with an - // invalid message type - ErrInvalidMsgType = fmt.Errorf("invalid msg type") - - // ErrSessionShutdown is used if there is a shutdown during - // an operation - ErrSessionShutdown = fmt.Errorf("session shutdown") - - // ErrStreamsExhausted is returned if we have no more - // stream ids to issue - ErrStreamsExhausted = fmt.Errorf("streams exhausted") - - // ErrDuplicateStream is used if a duplicate stream is - // opened inbound - ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") - - // ErrReceiveWindowExceeded indicates the window was exceeded - ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") - - // ErrTimeout is used when we reach an IO deadline - ErrTimeout = fmt.Errorf("i/o deadline reached") - - // ErrStreamClosed is returned when using a closed stream - ErrStreamClosed = fmt.Errorf("stream closed") - - // ErrUnexpectedFlag is set when we get an unexpected flag - ErrUnexpectedFlag = fmt.Errorf("unexpected flag") - - // ErrRemoteGoAway is used when we get a go away from the other side - ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") - - // ErrConnectionReset is sent if a stream is reset. This can happen - // if the backlog is exceeded, or if there was a remote GoAway. - ErrConnectionReset = fmt.Errorf("connection reset") - - // ErrConnectionWriteTimeout indicates that we hit the "safety valve" - // timeout writing to the underlying stream connection. - ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") - - // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close - ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") -) - -const ( - // protoVersion is the only version we support - protoVersion uint8 = 0 -) - -const ( - // Data is used for data frames. They are followed - // by length bytes worth of payload. - typeData uint8 = iota - - // WindowUpdate is used to change the window of - // a given stream. The length indicates the delta - // update to the window. - typeWindowUpdate - - // Ping is sent as a keep-alive or to measure - // the RTT. The StreamID and Length value are echoed - // back in the response. - typePing - - // GoAway is sent to terminate a session. The StreamID - // should be 0 and the length is an error code. - typeGoAway -) - -const ( - // SYN is sent to signal a new stream. May - // be sent with a data payload - flagSYN uint16 = 1 << iota - - // ACK is sent to acknowledge a new stream. May - // be sent with a data payload - flagACK - - // FIN is sent to half-close the given stream. - // May be sent with a data payload. - flagFIN - - // RST is used to hard close a given stream. - flagRST -) - -const ( - // initialStreamWindow is the initial stream window size - initialStreamWindow uint32 = 256 * 1024 -) - -const ( - // goAwayNormal is sent on a normal termination - goAwayNormal uint32 = iota - - // goAwayProtoErr sent on a protocol error - goAwayProtoErr - - // goAwayInternalErr sent on an internal error - goAwayInternalErr -) - -const ( - sizeOfVersion = 1 - sizeOfType = 1 - sizeOfFlags = 2 - sizeOfStreamID = 4 - sizeOfLength = 4 - headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + - sizeOfStreamID + sizeOfLength -) - -type header []byte - -func (h header) Version() uint8 { - return h[0] -} - -func (h header) MsgType() uint8 { - return h[1] -} - -func (h header) Flags() uint16 { - return binary.BigEndian.Uint16(h[2:4]) -} - -func (h header) StreamID() uint32 { - return binary.BigEndian.Uint32(h[4:8]) -} - -func (h header) Length() uint32 { - return binary.BigEndian.Uint32(h[8:12]) -} - -func (h header) String() string { - return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", - h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) -} - -func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { - h[0] = protoVersion - h[1] = msgType - binary.BigEndian.PutUint16(h[2:4], flags) - binary.BigEndian.PutUint32(h[4:8], streamID) - binary.BigEndian.PutUint32(h[8:12], length) -} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go deleted file mode 100644 index 7abc7c7..0000000 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ /dev/null @@ -1,87 +0,0 @@ -package yamux - -import ( - "fmt" - "io" - "os" - "time" -) - -// Config is used to tune the Yamux session -type Config struct { - // AcceptBacklog is used to limit how many streams may be - // waiting an accept. - AcceptBacklog int - - // EnableKeepalive is used to do a period keep alive - // messages using a ping. - EnableKeepAlive bool - - // KeepAliveInterval is how often to perform the keep alive - KeepAliveInterval time.Duration - - // ConnectionWriteTimeout is meant to be a "safety valve" timeout after - // we which will suspect a problem with the underlying connection and - // close it. This is only applied to writes, where's there's generally - // an expectation that things will move along quickly. - ConnectionWriteTimeout time.Duration - - // MaxStreamWindowSize is used to control the maximum - // window size that we allow for a stream. - MaxStreamWindowSize uint32 - - // LogOutput is used to control the log destination - LogOutput io.Writer -} - -// DefaultConfig is used to return a default configuration -func DefaultConfig() *Config { - return &Config{ - AcceptBacklog: 256, - EnableKeepAlive: true, - KeepAliveInterval: 30 * time.Second, - ConnectionWriteTimeout: 10 * time.Second, - MaxStreamWindowSize: initialStreamWindow, - LogOutput: os.Stderr, - } -} - -// VerifyConfig is used to verify the sanity of configuration -func VerifyConfig(config *Config) error { - if config.AcceptBacklog <= 0 { - return fmt.Errorf("backlog must be positive") - } - if config.KeepAliveInterval == 0 { - return fmt.Errorf("keep-alive interval must be positive") - } - if config.MaxStreamWindowSize < initialStreamWindow { - return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) - } - return nil -} - -// Server is used to initialize a new server-side connection. -// There must be at most one server-side connection. If a nil config is -// provided, the DefaultConfiguration will be used. -func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, false), nil -} - -// Client is used to initialize a new client-side connection. -// There must be at most one client-side connection. -func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, true), nil -} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go deleted file mode 100644 index 20369bd..0000000 --- a/vendor/github.com/hashicorp/yamux/session.go +++ /dev/null @@ -1,598 +0,0 @@ -package yamux - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Session is used to wrap a reliable ordered connection and to -// multiplex it into multiple streams. -type Session struct { - // remoteGoAway indicates the remote side does - // not want futher connections. Must be first for alignment. - remoteGoAway int32 - - // localGoAway indicates that we should stop - // accepting futher connections. Must be first for alignment. - localGoAway int32 - - // nextStreamID is the next stream we should - // send. This depends if we are a client/server. - nextStreamID uint32 - - // config holds our configuration - config *Config - - // logger is used for our logs - logger *log.Logger - - // conn is the underlying connection - conn io.ReadWriteCloser - - // bufRead is a buffered reader - bufRead *bufio.Reader - - // pings is used to track inflight pings - pings map[uint32]chan struct{} - pingID uint32 - pingLock sync.Mutex - - // streams maps a stream id to a stream - streams map[uint32]*Stream - streamLock sync.Mutex - - // synCh acts like a semaphore. It is sized to the AcceptBacklog which - // is assumed to be symmetric between the client and server. This allows - // the client to avoid exceeding the backlog and instead blocks the open. - synCh chan struct{} - - // acceptCh is used to pass ready streams to the client - acceptCh chan *Stream - - // sendCh is used to mark a stream as ready to send, - // or to send a header out directly. - sendCh chan sendReady - - // recvDoneCh is closed when recv() exits to avoid a race - // between stream registration and stream shutdown - recvDoneCh chan struct{} - - // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -// sendReady is used to either mark a stream as ready -// or to directly send a header -type sendReady struct { - Hdr []byte - Body io.Reader - Err chan error -} - -// newSession is used to construct a new session -func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { - s := &Session{ - config: config, - logger: log.New(config.LogOutput, "", log.LstdFlags), - conn: conn, - bufRead: bufio.NewReader(conn), - pings: make(map[uint32]chan struct{}), - streams: make(map[uint32]*Stream), - synCh: make(chan struct{}, config.AcceptBacklog), - acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), - recvDoneCh: make(chan struct{}), - shutdownCh: make(chan struct{}), - } - if client { - s.nextStreamID = 1 - } else { - s.nextStreamID = 2 - } - go s.recv() - go s.send() - if config.EnableKeepAlive { - go s.keepalive() - } - return s -} - -// IsClosed does a safe check to see if we have shutdown -func (s *Session) IsClosed() bool { - select { - case <-s.shutdownCh: - return true - default: - return false - } -} - -// NumStreams returns the number of currently open streams -func (s *Session) NumStreams() int { - s.streamLock.Lock() - num := len(s.streams) - s.streamLock.Unlock() - return num -} - -// Open is used to create a new stream as a net.Conn -func (s *Session) Open() (net.Conn, error) { - conn, err := s.OpenStream() - if err != nil { - return nil, err - } - return conn, nil -} - -// OpenStream is used to create a new stream -func (s *Session) OpenStream() (*Stream, error) { - if s.IsClosed() { - return nil, ErrSessionShutdown - } - if atomic.LoadInt32(&s.remoteGoAway) == 1 { - return nil, ErrRemoteGoAway - } - - // Block if we have too many inflight SYNs - select { - case s.synCh <- struct{}{}: - case <-s.shutdownCh: - return nil, ErrSessionShutdown - } - -GET_ID: - // Get and ID, and check for stream exhaustion - id := atomic.LoadUint32(&s.nextStreamID) - if id >= math.MaxUint32-1 { - return nil, ErrStreamsExhausted - } - if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { - goto GET_ID - } - - // Register the stream - stream := newStream(s, id, streamInit) - s.streamLock.Lock() - s.streams[id] = stream - s.streamLock.Unlock() - - // Send the window update to create - if err := stream.sendWindowUpdate(); err != nil { - return nil, err - } - return stream, nil -} - -// Accept is used to block until the next available stream -// is ready to be accepted. -func (s *Session) Accept() (net.Conn, error) { - conn, err := s.AcceptStream() - if err != nil { - return nil, err - } - return conn, err -} - -// AcceptStream is used to block until the next available stream -// is ready to be accepted. -func (s *Session) AcceptStream() (*Stream, error) { - select { - case stream := <-s.acceptCh: - if err := stream.sendWindowUpdate(); err != nil { - return nil, err - } - return stream, nil - case <-s.shutdownCh: - return nil, s.shutdownErr - } -} - -// Close is used to close the session and all streams. -// Attempts to send a GoAway before closing the connection. -func (s *Session) Close() error { - s.shutdownLock.Lock() - defer s.shutdownLock.Unlock() - - if s.shutdown { - return nil - } - s.shutdown = true - if s.shutdownErr == nil { - s.shutdownErr = ErrSessionShutdown - } - close(s.shutdownCh) - s.conn.Close() - <-s.recvDoneCh - - s.streamLock.Lock() - defer s.streamLock.Unlock() - for _, stream := range s.streams { - stream.forceClose() - } - return nil -} - -// exitErr is used to handle an error that is causing the -// session to terminate. -func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() - if s.shutdownErr == nil { - s.shutdownErr = err - } - s.shutdownLock.Unlock() - s.Close() -} - -// GoAway can be used to prevent accepting further -// connections. It does not close the underlying conn. -func (s *Session) GoAway() error { - return s.waitForSend(s.goAway(goAwayNormal), nil) -} - -// goAway is used to send a goAway message -func (s *Session) goAway(reason uint32) header { - atomic.SwapInt32(&s.localGoAway, 1) - hdr := header(make([]byte, headerSize)) - hdr.encode(typeGoAway, 0, 0, reason) - return hdr -} - -// Ping is used to measure the RTT response time -func (s *Session) Ping() (time.Duration, error) { - // Get a channel for the ping - ch := make(chan struct{}) - - // Get a new ping id, mark as pending - s.pingLock.Lock() - id := s.pingID - s.pingID++ - s.pings[id] = ch - s.pingLock.Unlock() - - // Send the ping request - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagSYN, 0, id) - if err := s.waitForSend(hdr, nil); err != nil { - return 0, err - } - - // Wait for a response - start := time.Now() - select { - case <-ch: - case <-time.After(s.config.ConnectionWriteTimeout): - s.pingLock.Lock() - delete(s.pings, id) // Ignore it if a response comes later. - s.pingLock.Unlock() - return 0, ErrTimeout - case <-s.shutdownCh: - return 0, ErrSessionShutdown - } - - // Compute the RTT - return time.Now().Sub(start), nil -} - -// keepalive is a long running goroutine that periodically does -// a ping to keep the connection alive. -func (s *Session) keepalive() { - for { - select { - case <-time.After(s.config.KeepAliveInterval): - _, err := s.Ping() - if err != nil { - s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) - s.exitErr(ErrKeepAliveTimeout) - return - } - case <-s.shutdownCh: - return - } - } -} - -// waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body io.Reader) error { - errCh := make(chan error, 1) - return s.waitForSendErr(hdr, body, errCh) -} - -// waitForSendErr waits to send a header with optional data, checking for a -// potential shutdown. Since there's the expectation that sends can happen -// in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() - - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} - select { - case s.sendCh <- ready: - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } - - select { - case err := <-errCh: - return err - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } -} - -// sendNoWait does a send without waiting. Since there's the expectation that -// the send happens right here, we enforce the connection write timeout if we -// can't queue the header to be sent. -func (s *Session) sendNoWait(hdr header) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() - - select { - case s.sendCh <- sendReady{Hdr: hdr}: - return nil - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } -} - -// send is a long running goroutine that sends data -func (s *Session) send() { - for { - select { - case ready := <-s.sendCh: - // Send a header if ready - if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n - } - } - - // Send data from a body if given - if ready.Body != nil { - _, err := io.Copy(s.conn, ready.Body) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - } - - // No error, successful send - asyncSendErr(ready.Err, nil) - case <-s.shutdownCh: - return - } - } -} - -// recv is a long running goroutine that accepts new data -func (s *Session) recv() { - if err := s.recvLoop(); err != nil { - s.exitErr(err) - } -} - -// recvLoop continues to receive data until a fatal error is encountered -func (s *Session) recvLoop() error { - defer close(s.recvDoneCh) - hdr := header(make([]byte, headerSize)) - var handler func(header) error - for { - // Read the header - if _, err := io.ReadFull(s.bufRead, hdr); err != nil { - if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { - s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) - } - return err - } - - // Verify the version - if hdr.Version() != protoVersion { - s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) - return ErrInvalidVersion - } - - // Switch on the type - switch hdr.MsgType() { - case typeData: - handler = s.handleStreamMessage - case typeWindowUpdate: - handler = s.handleStreamMessage - case typeGoAway: - handler = s.handleGoAway - case typePing: - handler = s.handlePing - default: - return ErrInvalidMsgType - } - - // Invoke the handler - if err := handler(hdr); err != nil { - return err - } - } -} - -// handleStreamMessage handles either a data or window update frame -func (s *Session) handleStreamMessage(hdr header) error { - // Check for a new stream creation - id := hdr.StreamID() - flags := hdr.Flags() - if flags&flagSYN == flagSYN { - if err := s.incomingStream(id); err != nil { - return err - } - } - - // Get the stream - s.streamLock.Lock() - stream := s.streams[id] - s.streamLock.Unlock() - - // If we do not have a stream, likely we sent a RST - if stream == nil { - // Drain any data on the wire - if hdr.MsgType() == typeData && hdr.Length() > 0 { - s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) - if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { - s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) - return nil - } - } else { - s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) - } - return nil - } - - // Check if this is a window update - if hdr.MsgType() == typeWindowUpdate { - if err := stream.incrSendWindow(hdr, flags); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil - } - - // Read the new data - if err := stream.readData(hdr, flags, s.bufRead); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil -} - -// handlePing is invokde for a typePing frame -func (s *Session) handlePing(hdr header) error { - flags := hdr.Flags() - pingID := hdr.Length() - - // Check if this is a query, respond back in a separate context so we - // don't interfere with the receiving thread blocking for the write. - if flags&flagSYN == flagSYN { - go func() { - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagACK, 0, pingID) - if err := s.sendNoWait(hdr); err != nil { - s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) - } - }() - return nil - } - - // Handle a response - s.pingLock.Lock() - ch := s.pings[pingID] - if ch != nil { - delete(s.pings, pingID) - close(ch) - } - s.pingLock.Unlock() - return nil -} - -// handleGoAway is invokde for a typeGoAway frame -func (s *Session) handleGoAway(hdr header) error { - code := hdr.Length() - switch code { - case goAwayNormal: - atomic.SwapInt32(&s.remoteGoAway, 1) - case goAwayProtoErr: - s.logger.Printf("[ERR] yamux: received protocol error go away") - return fmt.Errorf("yamux protocol error") - case goAwayInternalErr: - s.logger.Printf("[ERR] yamux: received internal error go away") - return fmt.Errorf("remote yamux internal error") - default: - s.logger.Printf("[ERR] yamux: received unexpected go away") - return fmt.Errorf("unexpected go away received") - } - return nil -} - -// incomingStream is used to create a new incoming stream -func (s *Session) incomingStream(id uint32) error { - // Reject immediately if we are doing a go away - if atomic.LoadInt32(&s.localGoAway) == 1 { - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(hdr) - } - - // Allocate a new stream - stream := newStream(s, id, streamSYNReceived) - - s.streamLock.Lock() - defer s.streamLock.Unlock() - - // Check if stream already exists - if _, ok := s.streams[id]; ok { - s.logger.Printf("[ERR] yamux: duplicate stream declared") - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return ErrDuplicateStream - } - - // Register the stream - s.streams[id] = stream - - // Check if we've exceeded the backlog - select { - case s.acceptCh <- stream: - return nil - default: - // Backlog exceeded! RST the stream - s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") - delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) - } -} - -// closeStream is used to close a stream once both sides have -// issued a close. -func (s *Session) closeStream(id uint32) { - s.streamLock.Lock() - delete(s.streams, id) - s.streamLock.Unlock() -} - -// establishStream is used to mark a stream that was in the -// SYN Sent state as established. -func (s *Session) establishStream() { - select { - case <-s.synCh: - default: - panic("established stream without inflight syn") - } -} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md deleted file mode 100644 index 419470b..0000000 --- a/vendor/github.com/hashicorp/yamux/spec.md +++ /dev/null @@ -1,141 +0,0 @@ -# Specification - -We use this document to detail the internal specification of Yamux. -This is used both as a guide for implementing Yamux, but also for -alternative interoperable libraries to be built. - -# Framing - -Yamux uses a streaming connection underneath, but imposes a message -framing so that it can be shared between many logical streams. Each -frame contains a header like: - -* Version (8 bits) -* Type (8 bits) -* Flags (16 bits) -* StreamID (32 bits) -* Length (32 bits) - -This means that each header has a 12 byte overhead. -All fields are encoded in network order (big endian). -Each field is described below: - -## Version Field - -The version field is used for future backwards compatibily. At the -current time, the field is always set to 0, to indicate the initial -version. - -## Type Field - -The type field is used to switch the frame message type. The following -message types are supported: - -* 0x0 Data - Used to transmit data. May transmit zero length payloads - depending on the flags. - -* 0x1 Window Update - Used to updated the senders receive window size. - This is used to implement per-session flow control. - -* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat - and do keep-alives over TCP. - -* 0x3 Go Away - Used to close a session. - -## Flag Field - -The flags field is used to provide additional information related -to the message type. The following flags are supported: - -* 0x1 SYN - Signals the start of a new stream. May be sent with a data or - window update message. Also sent with a ping to indicate outbound. - -* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data - or window update message. Also sent with a ping to indicate response. - -* 0x4 FIN - Performs a half-close of a stream. May be sent with a data - message or window update. - -* 0x8 RST - Reset a stream immediately. May be sent with a data or - window update message. - -## StreamID Field - -The StreamID field is used to identify the logical stream the frame -is addressing. The client side should use odd ID's, and the server even. -This prevents any collisions. Additionally, the 0 ID is reserved to represent -the session. - -Both Ping and Go Away messages should always use the 0 StreamID. - -## Length Field - -The meaning of the length field depends on the message type: - -* Data - provides the length of bytes following the header -* Window update - provides a delta update to the window size -* Ping - Contains an opaque value, echoed back -* Go Away - Contains an error code - -# Message Flow - -There is no explicit connection setup, as Yamux relies on an underlying -transport to be provided. However, there is a distinction between client -and server side of the connection. - -## Opening a stream - -To open a stream, an initial data or window update frame is sent -with a new StreamID. The SYN flag should be set to signal a new stream. - -The receiver must then reply with either a data or window update frame -with the StreamID along with the ACK flag to accept the stream or with -the RST flag to reject the stream. - -Because we are relying on the reliable stream underneath, a connection -can begin sending data once the SYN flag is sent. The corresponding -ACK does not need to be received. This is particularly well suited -for an RPC system where a client wants to open a stream and immediately -fire a request without wiating for the RTT of the ACK. - -This does introduce the possibility of a connection being rejected -after data has been sent already. This is a slight semantic difference -from TCP, where the conection cannot be refused after it is opened. -Clients should be prepared to handle this by checking for an error -that indicates a RST was received. - -## Closing a stream - -To close a stream, either side sends a data or window update frame -along with the FIN flag. This does a half-close indicating the sender -will send no further data. - -Once both sides have closed the connection, the stream is closed. - -Alternatively, if an error occurs, the RST flag can be used to -hard close a stream immediately. - -## Flow Control - -When Yamux is initially starts each stream with a 256KB window size. -There is no window size for the session. - -To prevent the streams from stalling, window update frames should be -sent regularly. Yamux can be configured to provide a larger limit for -windows sizes. Both sides assume the initial 256KB window, but can -immediately send a window update as part of the SYN/ACK indicating a -larger window. - -Both sides should track the number of bytes sent in Data frames -only, as only they are tracked as part of the window size. - -## Session termination - -When a session is being terminated, the Go Away message should -be sent. The Length should be set to one of the following to -provide an error code: - -* 0x0 Normal termination -* 0x1 Protocol error -* 0x2 Internal error - diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go deleted file mode 100644 index 4c3242d..0000000 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ /dev/null @@ -1,452 +0,0 @@ -package yamux - -import ( - "bytes" - "io" - "sync" - "sync/atomic" - "time" -) - -type streamState int - -const ( - streamInit streamState = iota - streamSYNSent - streamSYNReceived - streamEstablished - streamLocalClose - streamRemoteClose - streamClosed - streamReset -) - -// Stream is used to represent a logical stream -// within a session. -type Stream struct { - recvWindow uint32 - sendWindow uint32 - - id uint32 - session *Session - - state streamState - stateLock sync.Mutex - - recvBuf *bytes.Buffer - recvLock sync.Mutex - - controlHdr header - controlErr chan error - controlHdrLock sync.Mutex - - sendHdr header - sendErr chan error - sendLock sync.Mutex - - recvNotifyCh chan struct{} - sendNotifyCh chan struct{} - - readDeadline time.Time - writeDeadline time.Time -} - -// newStream is used to construct a new stream within -// a given session for an ID -func newStream(session *Session, id uint32, state streamState) *Stream { - s := &Stream{ - id: id, - session: session, - state: state, - controlHdr: header(make([]byte, headerSize)), - controlErr: make(chan error, 1), - sendHdr: header(make([]byte, headerSize)), - sendErr: make(chan error, 1), - recvWindow: initialStreamWindow, - sendWindow: initialStreamWindow, - recvNotifyCh: make(chan struct{}, 1), - sendNotifyCh: make(chan struct{}, 1), - } - return s -} - -// Session returns the associated stream session -func (s *Stream) Session() *Session { - return s.session -} - -// StreamID returns the ID of this stream -func (s *Stream) StreamID() uint32 { - return s.id -} - -// Read is used to read from the stream -func (s *Stream) Read(b []byte) (n int, err error) { - defer asyncNotify(s.recvNotifyCh) -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamRemoteClose: - fallthrough - case streamClosed: - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.stateLock.Unlock() - return 0, io.EOF - } - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - s.recvLock.Lock() - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.recvLock.Unlock() - goto WAIT - } - - // Read any bytes - n, _ = s.recvBuf.Read(b) - s.recvLock.Unlock() - - // Send a window update potentially - err = s.sendWindowUpdate() - return n, err - -WAIT: - var timeout <-chan time.Time - if !s.readDeadline.IsZero() { - delay := s.readDeadline.Sub(time.Now()) - timeout = time.After(delay) - } - select { - case <-s.recvNotifyCh: - goto START - case <-timeout: - return 0, ErrTimeout - } -} - -// Write is used to write to the stream -func (s *Stream) Write(b []byte) (n int, err error) { - s.sendLock.Lock() - defer s.sendLock.Unlock() - total := 0 - for total < len(b) { - n, err := s.write(b[total:]) - total += n - if err != nil { - return total, err - } - } - return total, nil -} - -// write is used to write to the stream, may return on -// a short write. -func (s *Stream) write(b []byte) (n int, err error) { - var flags uint16 - var max uint32 - var body io.Reader -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamClosed: - s.stateLock.Unlock() - return 0, ErrStreamClosed - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - window := atomic.LoadUint32(&s.sendWindow) - if window == 0 { - goto WAIT - } - - // Determine the flags if any - flags = s.sendFlags() - - // Send up to our send window - max = min(window, uint32(len(b))) - body = bytes.NewReader(b[:max]) - - // Send the header - s.sendHdr.encode(typeData, flags, s.id, max) - if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { - return 0, err - } - - // Reduce our send window - atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) - - // Unlock - return int(max), err - -WAIT: - var timeout <-chan time.Time - if !s.writeDeadline.IsZero() { - delay := s.writeDeadline.Sub(time.Now()) - timeout = time.After(delay) - } - select { - case <-s.sendNotifyCh: - goto START - case <-timeout: - return 0, ErrTimeout - } - return 0, nil -} - -// sendFlags determines any flags that are appropriate -// based on the current stream state -func (s *Stream) sendFlags() uint16 { - s.stateLock.Lock() - defer s.stateLock.Unlock() - var flags uint16 - switch s.state { - case streamInit: - flags |= flagSYN - s.state = streamSYNSent - case streamSYNReceived: - flags |= flagACK - s.state = streamEstablished - } - return flags -} - -// sendWindowUpdate potentially sends a window update enabling -// further writes to take place. Must be invoked with the lock. -func (s *Stream) sendWindowUpdate() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - // Determine the delta update - max := s.session.config.MaxStreamWindowSize - delta := max - atomic.LoadUint32(&s.recvWindow) - - // Determine the flags if any - flags := s.sendFlags() - - // Check if we can omit the update - if delta < (max/2) && flags == 0 { - return nil - } - - // Update our window - atomic.AddUint32(&s.recvWindow, delta) - - // Send the header - s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - return err - } - return nil -} - -// sendClose is used to send a FIN -func (s *Stream) sendClose() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - flags := s.sendFlags() - flags |= flagFIN - s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - return err - } - return nil -} - -// Close is used to close the stream -func (s *Stream) Close() error { - closeStream := false - s.stateLock.Lock() - switch s.state { - // Opened means we need to signal a close - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamLocalClose - goto SEND_CLOSE - - case streamLocalClose: - case streamRemoteClose: - s.state = streamClosed - closeStream = true - goto SEND_CLOSE - - case streamClosed: - case streamReset: - default: - panic("unhandled state") - } - s.stateLock.Unlock() - return nil -SEND_CLOSE: - s.stateLock.Unlock() - s.sendClose() - s.notifyWaiting() - if closeStream { - s.session.closeStream(s.id) - } - return nil -} - -// forceClose is used for when the session is exiting -func (s *Stream) forceClose() { - s.stateLock.Lock() - s.state = streamClosed - s.stateLock.Unlock() - s.notifyWaiting() -} - -// processFlags is used to update the state of the stream -// based on set flags, if any. Lock must be held -func (s *Stream) processFlags(flags uint16) error { - // Close the stream without holding the state lock - closeStream := false - defer func() { - if closeStream { - s.session.closeStream(s.id) - } - }() - - s.stateLock.Lock() - defer s.stateLock.Unlock() - if flags&flagACK == flagACK { - if s.state == streamSYNSent { - s.state = streamEstablished - } - s.session.establishStream() - } - if flags&flagFIN == flagFIN { - switch s.state { - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamRemoteClose - s.notifyWaiting() - case streamLocalClose: - s.state = streamClosed - closeStream = true - s.notifyWaiting() - default: - s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) - return ErrUnexpectedFlag - } - } - if flags&flagRST == flagRST { - if s.state == streamSYNSent { - s.session.establishStream() - } - s.state = streamReset - closeStream = true - s.notifyWaiting() - } - return nil -} - -// notifyWaiting notifies all the waiting channels -func (s *Stream) notifyWaiting() { - asyncNotify(s.recvNotifyCh) - asyncNotify(s.sendNotifyCh) -} - -// incrSendWindow updates the size of our send window -func (s *Stream) incrSendWindow(hdr header, flags uint16) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Increase window, unblock a sender - atomic.AddUint32(&s.sendWindow, hdr.Length()) - asyncNotify(s.sendNotifyCh) - return nil -} - -// readData is used to handle a data frame -func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Check that our recv window is not exceeded - length := hdr.Length() - if length == 0 { - return nil - } - if remain := atomic.LoadUint32(&s.recvWindow); length > remain { - s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) - return ErrRecvWindowExceeded - } - - // Wrap in a limited reader - conn = &io.LimitedReader{R: conn, N: int64(length)} - - // Copy into buffer - s.recvLock.Lock() - if s.recvBuf == nil { - // Allocate the receive buffer just-in-time to fit the full data frame. - // This way we can read in the whole packet without further allocations. - s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) - } - if _, err := io.Copy(s.recvBuf, conn); err != nil { - s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) - s.recvLock.Unlock() - return err - } - - // Decrement the receive window - atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) - s.recvLock.Unlock() - - // Unblock any readers - asyncNotify(s.recvNotifyCh) - return nil -} - -// SetDeadline sets the read and write deadlines -func (s *Stream) SetDeadline(t time.Time) error { - if err := s.SetReadDeadline(t); err != nil { - return err - } - if err := s.SetWriteDeadline(t); err != nil { - return err - } - return nil -} - -// SetReadDeadline sets the deadline for future Read calls. -func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline = t - return nil -} - -// SetWriteDeadline sets the deadline for future Write calls -func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline = t - return nil -} - -// Shrink is used to compact the amount of buffers utilized -// This is useful when using Yamux in a connection pool to reduce -// the idle memory utilization. -func (s *Stream) Shrink() { - s.recvLock.Lock() - if s.recvBuf != nil && s.recvBuf.Len() == 0 { - s.recvBuf = nil - } - s.recvLock.Unlock() -} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go deleted file mode 100644 index 5fe45af..0000000 --- a/vendor/github.com/hashicorp/yamux/util.go +++ /dev/null @@ -1,28 +0,0 @@ -package yamux - -// asyncSendErr is used to try an async send of an error -func asyncSendErr(ch chan error, err error) { - if ch == nil { - return - } - select { - case ch <- err: - default: - } -} - -// asyncNotify is used to signal a waiting goroutine -func asyncNotify(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} - -// min computes the minimum of two values -func min(a, b uint32) uint32 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a..0000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d28..0000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 187ef67..0000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 9cfa988..0000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d23..0000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89..0000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c7460..0000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c..0000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 1240a17..0000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cb..0000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d..0000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/lsegal/gucumber/LICENSE.txt b/vendor/github.com/lsegal/gucumber/LICENSE.txt deleted file mode 100755 index 342cce5..0000000 --- a/vendor/github.com/lsegal/gucumber/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015 Loren Segal - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lsegal/gucumber/Makefile b/vendor/github.com/lsegal/gucumber/Makefile deleted file mode 100644 index a09b5ed..0000000 --- a/vendor/github.com/lsegal/gucumber/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -deps: - go get ./... - -build: deps - go install ./cmd/gucumber - -test: build - go test ./... - gucumber diff --git a/vendor/github.com/lsegal/gucumber/README.md b/vendor/github.com/lsegal/gucumber/README.md deleted file mode 100755 index 0fbe81c..0000000 --- a/vendor/github.com/lsegal/gucumber/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# Gucumber - -[![GoDoc](http://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/gucumber/gucumber) -[![Build Status](https://img.shields.io/travis/gucumber/gucumber.svg)](https://travis-ci.org/gucumber/gucumber) -[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/gucumber/gucumber/blob/master/LICENSE.txt) - -An implementation of [Cucumber][cuke] BDD-style testing for Go. - -# Installing - -```sh -$ go get github.com/gucumber/gucumber/cmd/gucumber -``` - -# Usage - -Cucumber tests are made up of plain text ".feature" files and program source -"step definitions", which for Gucumber are written in Go. - -## Features - -Put [feature files][features] `internal/features/` with whatever organization you -prefer. For example, you might create `internal/features/accounts/login.feature` -with the following text: - -``` -@login -Feature: Login Support - - Scenario: User successfully logs in - Given I have user/pass "foo" / "bar" - And they log into the website with user "foo" and password "bar" - Then the user should be successfully logged in -``` - -## Step Definitions - -Create step definitions to match each step in your feature files. These go -in ".go" files in the same `internal/features/` directory. We might create -`internal/features/accounts/step_definitions.go`: - -```go -package accounts - -import ( - . "github.com/gucumber/gucumber" -) - -func init() { - user, pass := "", "" - - Before("@login", func() { - // runs before every feature or scenario tagged with @login - generatePassword() - }) - - Given(`^I have user/pass "(.+?)" / "(.+?)"$`, func(u, p string) { - user, pass = u, p - }) - - // ... - - Then(`^the user should be successfully logged in$`, func() { - if !userIsLoggedIn() { - T.Fail("user should have been logged in") - } - }) -} -``` - -### T? - -The `T` value is a [testing.T](http://golang.org/pkg/testing/#T) style -value that represents the test context for each test. It mostly supports -`Errorf(fmt, args...)`, but also supports other convenience methods. See - the [API documentation](http://godoc.org/github.com/gucumber/gucumber#TestingT) - for more information. - -## Running - -To run your tests, execute: - -```sh -$ gucumber -``` - -You can also specify the path to features in command line arguments: - -```sh -$ gucumber path/to/features -``` - -You can also filter features and scenarios by tags: - -```sh -$ gucumber -tags=@login # only run login feature(s) -``` - -Or: - -```sh -$ gucumber -tags=~@slow # ignore all "slow" scenarios -``` - -# Copyright - -This library was written by [Loren Segal][lsegal] in 2015. It is licensed for -use under the [MIT license][mit]. - -[cuke]: http://cukes.info -[features]: https://github.com/cucumber/cucumber/wiki/Feature-Introduction -[lsegal]: http://gnuu.org -[mit]: http://opensource.org/licenses/MIT diff --git a/vendor/github.com/lsegal/gucumber/builder.go b/vendor/github.com/lsegal/gucumber/builder.go deleted file mode 100644 index c458bc9..0000000 --- a/vendor/github.com/lsegal/gucumber/builder.go +++ /dev/null @@ -1,160 +0,0 @@ -package gucumber - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" -) - -const ( - importMarkerFile = "importmarker__.go" - testFile = "gucumbertest__.go" -) - -// BuildAndRunDir builds the given director's features into Go Code. -// Using the filters provided. An error is returned if the build fails. -func BuildAndRunDir(dir string, filters []string) error { - return buildAndRunDir(dir, filters, "") -} - -// BuildAndRunDirWithGoBuildTags builds the given director's features into Go -// Code using the filters provided. Also takes a string for the build tags to -// be passed to the go command. An error is returned if the build fails. -// -// If goBuildTags is empty, the param will be ignored. -func BuildAndRunDirWithGoBuildTags(dir string, filters []string, goBuildTags string) error { - return buildAndRunDir(dir, filters, goBuildTags) -} - -func buildAndRunDir(dir string, filters []string, goBuildTags string) error { - defer buildCleanup(dir) - - info := buildInfo{ - Imports: []string{}, - FeaturesPath: fmt.Sprintf("%q", dir), - Filters: filters, - } - - goFiles, _ := filepath.Glob(filepath.Join(dir, "*.go")) - goFiles2, _ := filepath.Glob(filepath.Join(dir, "**", "*.go")) - goFiles = append(goFiles, goFiles2...) - - // write special constants to packages so they can be imported - for _, file := range goFiles { - ifile := filepath.Join(filepath.Dir(file), importMarkerFile) - if _, err := os.Stat(ifile); err != nil { - pkgName := filepath.Base(filepath.Dir(file)) - if pkgName == "_test" { - continue - } - fullPkg := assembleImportPath(file) - - if fullPkg == "" { - return fmt.Errorf("could not determine package path for %s", file) - } - - info.Imports = append(info.Imports, fullPkg) - - src := fmt.Sprintf("package %s\nvar IMPORT_MARKER = true\n", pkgName) - err = ioutil.WriteFile(ifile, []byte(src), 0664) - if err != nil { - return err - } - } - } - - // write main test stub - os.MkdirAll(filepath.Join(dir, "_test"), 0777) - f, err := os.Create(filepath.Join(dir, "_test", testFile)) - if err != nil { - return err - } - tplMain.Execute(f, info) - f.Close() - - // now run the command - tfile := "./" + filepath.ToSlash(dir) + "/_test/" + testFile - var cmd *exec.Cmd - if len(goBuildTags) > 0 { - cmd = exec.Command("go", "run", "-tags", goBuildTags, tfile) - } else { - cmd = exec.Command("go", "run", tfile) - } - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - buildCleanup(dir) - os.Exit(1) - } - - return nil -} - -// ToSlash is being used to coerce the different -// os PathSeparators into the forward slash -// as the forward slash is required by Go's import statement -func assembleImportPath(file string) string { - a, _ := filepath.Abs(filepath.Dir(file)) - absPath, fullPkg := filepath.ToSlash(a), "" - for _, p := range filepath.SplitList(os.Getenv("GOPATH")) { - a, _ = filepath.Abs(p) - p = filepath.ToSlash(a) - if strings.HasPrefix(absPath, p) { - prefixPath := filepath.ToSlash(filepath.Join(p, "src")) - rpath, _ := filepath.Rel(prefixPath, absPath) - fullPkg = filepath.ToSlash(rpath) - break - } - } - return fullPkg -} - -type buildInfo struct { - Imports []string - FeaturesPath string - Filters []string -} - -func buildCleanup(dir string) { - g, _ := filepath.Glob(filepath.Join(dir, importMarkerFile)) - g2, _ := filepath.Glob(filepath.Join(dir, "**", importMarkerFile)) - - g = append(g, g2...) - for _, d := range g { - os.Remove(d) - } - - p := filepath.Join(dir, "_test") - if _, err := os.Stat(p); err == nil { - os.RemoveAll(p) - } -} - -var tplMain = template.Must(template.New("main").Parse(` -package main - -import ( - "github.com/gucumber/gucumber" - {{range $n, $i := .Imports}}_i{{$n}} "{{$i}}" - {{end}} -) - -var ( - {{range $n, $i := .Imports}}_ci{{$n}} = _i{{$n}}.IMPORT_MARKER - {{end}} -) - -func main() { - {{if .Filters}} - gucumber.GlobalContext.Filters = []string{ - {{range $_, $f := .Filters}}"{{$f}}", - {{end}} - } - {{end}} - gucumber.GlobalContext.RunDir({{.FeaturesPath}}) -} -`)) diff --git a/vendor/github.com/lsegal/gucumber/run_main.go b/vendor/github.com/lsegal/gucumber/run_main.go deleted file mode 100644 index 564a8f3..0000000 --- a/vendor/github.com/lsegal/gucumber/run_main.go +++ /dev/null @@ -1,44 +0,0 @@ -package gucumber - -import ( - "flag" - "fmt" -) - -type filters []string - -func (f *filters) String() string { - return fmt.Sprint(*f) -} - -func (f *filters) Set(value string) error { - *f = append(*f, value) - return nil -} - -var filterFlag filters -var goBuildTags string - -func init() { - flag.Var(&filterFlag, "tags", "comma-separated list of tags to filter scenarios by") - flag.StringVar(&goBuildTags, "go-tags", "", "space seperated list of tags, wrap in quotes to specify multiple tags") -} - -func RunMain() { - flag.Parse() - - var dir string - if flag.NArg() == 0 { - dir = "internal/features" - } else { - dir = flag.Arg(0) - } - - filt := []string{} - for _, f := range filterFlag { - filt = append(filt, string(f)) - } - if err := BuildAndRunDirWithGoBuildTags(dir, filt, goBuildTags); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/lsegal/gucumber/runner.go b/vendor/github.com/lsegal/gucumber/runner.go deleted file mode 100755 index 413b6ec..0000000 --- a/vendor/github.com/lsegal/gucumber/runner.go +++ /dev/null @@ -1,419 +0,0 @@ -package gucumber - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - "strings" - - "github.com/gucumber/gucumber/gherkin" - "github.com/shiena/ansicolor" -) - -const ( - clrWhite = "0" - clrRed = "31" - clrGreen = "32" - clrYellow = "33" - clrCyan = "36" - - txtUnmatchInt = `(\d+)` - txtUnmatchFloat = `(-?\d+(?:\.\d+)?)` - txtUnmatchStr = `"(.+?)"` -) - -var ( - reUnmatchInt = regexp.MustCompile(txtUnmatchInt) - reUnmatchFloat = regexp.MustCompile(txtUnmatchFloat) - reUnmatchStr = regexp.MustCompile(`(<|").+?("|>)`) - reOutlineVal = regexp.MustCompile(`<(.+?)>`) -) - -type Runner struct { - *Context - Features []*gherkin.Feature - Results []RunnerResult - Unmatched []*gherkin.Step - FailCount int - SkipCount int -} - -type RunnerResult struct { - *TestingT - *gherkin.Feature - *gherkin.Scenario -} - -func (c *Context) RunDir(dir string) (*Runner, error) { - g, _ := filepath.Glob(filepath.Join(dir, "*.feature")) - g2, _ := filepath.Glob(filepath.Join(dir, "**", "*.feature")) - g = append(g, g2...) - - runner, err := c.RunFiles(g) - if err != nil { - panic(err) - } - - if len(runner.Unmatched) > 0 { - fmt.Println("Some steps were missing, you can add them by using the following step definition stubs: ") - fmt.Println("") - fmt.Print(runner.MissingMatcherStubs()) - } - - os.Exit(runner.FailCount) - return runner, err -} - -func (c *Context) RunFiles(featureFiles []string) (*Runner, error) { - r := Runner{ - Context: c, - Features: []*gherkin.Feature{}, - Results: []RunnerResult{}, - Unmatched: []*gherkin.Step{}, - } - - for _, file := range featureFiles { - fd, err := os.Open(file) - if err != nil { - return nil, err - } - defer fd.Close() - - b, err := ioutil.ReadAll(fd) - if err != nil { - return nil, err - } - - fs, err := gherkin.ParseFilename(string(b), file) - if err != nil { - return nil, err - } - - for _, f := range fs { - r.Features = append(r.Features, &f) - } - } - - r.run() - return &r, nil -} - -func (c *Runner) MissingMatcherStubs() string { - var buf bytes.Buffer - matches := map[string]bool{} - - buf.WriteString(`import . "github.com/gucumber/gucumber"` + "\n\n") - buf.WriteString("func init() {\n") - - for _, m := range c.Unmatched { - numInts, numFloats, numStrs := 1, 1, 1 - str, args := m.Text, []string{} - str = reUnmatchInt.ReplaceAllStringFunc(str, func(s string) string { - args = append(args, fmt.Sprintf("i%d int", numInts)) - numInts++ - return txtUnmatchInt - }) - str = reUnmatchFloat.ReplaceAllStringFunc(str, func(s string) string { - args = append(args, fmt.Sprintf("s%d float64", numFloats)) - numFloats++ - return txtUnmatchFloat - }) - str = reUnmatchStr.ReplaceAllStringFunc(str, func(s string) string { - args = append(args, fmt.Sprintf("s%d string", numStrs)) - numStrs++ - return txtUnmatchStr - }) - - if len(m.Argument) > 0 { - if m.Argument.IsTabular() { - args = append(args, "table [][]string") - } else { - args = append(args, "data string") - } - } - - // Don't duplicate matchers. This is mostly for scenario outlines. - if matches[str] { - continue - } - matches[str] = true - - fmt.Fprintf(&buf, "\t%s(`^%s$`, func(%s) {\n\t\tT.Skip() // pending\n\t})\n\n", - m.Type, str, strings.Join(args, ", ")) - } - - buf.WriteString("}\n") - return buf.String() -} - -func (c *Runner) run() { - if c.BeforeAllFilter != nil { - c.BeforeAllFilter() - } - for _, f := range c.Features { - c.runFeature(f) - } - if c.AfterAllFilter != nil { - c.AfterAllFilter() - } - - c.line("0;1", "Finished (%d passed, %d failed, %d skipped).\n", - len(c.Results)-c.FailCount-c.SkipCount, c.FailCount, c.SkipCount) -} - -func (c *Runner) runFeature(f *gherkin.Feature) { - if !f.FilterMatched(c.Filters...) { - result := false - - // if any scenarios match, we will run those - for _, s := range f.Scenarios { - if s.FilterMatched(f, c.Filters...) { - result = true - break - } - } - - if !result { - return - } - } - - for k, fn := range c.BeforeFilters { - if f.FilterMatched(strings.Split(k, "|")...) { - fn() - } - } - - if len(f.Tags) > 0 { - c.line(clrCyan, strings.Join([]string(f.Tags), " ")) - } - c.line("0;1", "Feature: %s", f.Title) - - if f.Background.Steps != nil { - c.runScenario("Background", f, &f.Background, false) - } - - for _, s := range f.Scenarios { - c.runScenario("Scenario", f, &s, false) - } - - for k, fn := range c.AfterFilters { - if f.FilterMatched(strings.Split(k, "|")...) { - fn() - } - } -} - -func (c *Runner) runScenario(title string, f *gherkin.Feature, s *gherkin.Scenario, isExample bool) { - if !s.FilterMatched(f, c.Filters...) { - return - } - - for k, fn := range c.BeforeFilters { - if s.FilterMatched(f, strings.Split(k, "|")...) { - fn() - } - } - - if s.Examples != "" { // run scenario outline data - exrows := strings.Split(string(s.Examples), "\n") - - c.line(clrCyan, " "+strings.Join([]string(s.Tags), " ")) - c.fileLine("0;1", " %s Outline: %s", s.Filename, s.Line, s.LongestLine()+1, - title, s.Title) - - for _, step := range s.Steps { - c.fileLine("0;0", " %s %s", step.Filename, step.Line, - s.LongestLine()+1, step.Type, step.Text) - } - - c.line(clrWhite, "") - c.line("0;1", " Examples:") - c.line(clrCyan, " %s", exrows[0]) - - tab := s.Examples.ToTable() - tabmap := tab.ToMap() - for i, rows := 1, len(tab); i < rows; i++ { - other := gherkin.Scenario{ - Filename: s.Filename, - Line: s.Line, - Title: s.Title, - Examples: gherkin.StringData(""), - Steps: []gherkin.Step{}, - } - - for _, step := range s.Steps { - step.Text = reOutlineVal.ReplaceAllStringFunc(step.Text, func(t string) string { - return tabmap[t[1:len(t)-1]][i-1] - }) - other.Steps = append(other.Steps, step) - } - - fc := c.FailCount - clr := clrGreen - c.runScenario(title, f, &other, true) - - if fc != c.FailCount { - clr = clrRed - } - - c.line(clr, " %s", exrows[i]) - } - c.line(clrWhite, "") - - for k, fn := range c.AfterFilters { - if s.FilterMatched(f, strings.Split(k, "|")...) { - fn() - } - } - - return - } - - t := &TestingT{} - skipping := false - clr := clrGreen - - if !isExample { - if len(s.Tags) > 0 { - c.line(clrCyan, " "+strings.Join([]string(s.Tags), " ")) - } - c.fileLine("0;1", " %s: %s", s.Filename, s.Line, s.LongestLine(), - title, s.Title) - } - - for _, step := range s.Steps { - errCount := len(t.errors) - found := false - if !skipping && !t.Failed() { - done := make(chan bool) - go func() { - defer func() { - c.Results = append(c.Results, RunnerResult{t, f, s}) - - if t.Skipped() { - c.SkipCount++ - skipping = true - clr = clrYellow - } else if t.Failed() { - c.FailCount++ - clr = clrRed - } - - done <- true - }() - - f, err := c.Execute(t, step.Text, string(step.Argument)) - if err != nil { - t.Error(err) - } - found = f - - if !f { - t.Skip("no match function for step") - } - }() - <-done - } - - if skipping && !found { - cstep := step - c.Unmatched = append(c.Unmatched, &cstep) - } - - if !isExample { - c.fileLine(clr, " %s %s", step.Filename, step.Line, - s.LongestLine(), step.Type, step.Text) - if len(step.Argument) > 0 { - if !step.Argument.IsTabular() { - c.line(clrWhite, ` """`) - } - for _, l := range strings.Split(string(step.Argument), "\n") { - c.line(clrWhite, " %s", l) - } - if !step.Argument.IsTabular() { - c.line(clrWhite, ` """`) - } - } - } - - if len(t.errors) > errCount { - c.line(clrRed, "\n"+t.errors[len(t.errors)-1].message) - } - } - if !isExample { - c.line(clrWhite, "") - } - - for k, fn := range c.AfterFilters { - if s.FilterMatched(f, strings.Split(k, "|")...) { - fn() - } - } -} - -var writer = ansicolor.NewAnsiColorWriter(os.Stdout) - -func (c *Runner) line(clr, text string, args ...interface{}) { - fmt.Fprintf(writer, "\033[%sm%s\033[0;0m\n", clr, fmt.Sprintf(text, args...)) -} - -func (c *Runner) fileLine(clr, text, filename string, line int, max int, args ...interface{}) { - space, str := "", fmt.Sprintf(text, args...) - if l := max + 5 - len(str); l > 0 { - space = strings.Repeat(" ", l) - } - comment := fmt.Sprintf("%s \033[39;0m# %s:%d", space, filename, line) - c.line(clr, "%s%s", str, comment) -} - -type Tester interface { - Errorf(format string, args ...interface{}) - Skip(args ...interface{}) -} - -type TestingT struct { - skipped bool - errors []TestError -} - -type TestError struct { - message string - stack []byte -} - -func (t *TestingT) Errorf(format string, args ...interface{}) { - var buf bytes.Buffer - - str := fmt.Sprintf(format, args...) - sbuf := make([]byte, 8192) - for { - size := runtime.Stack(sbuf, false) - if size < len(sbuf) { - break - } - buf.Write(sbuf[0:size]) - } - - t.errors = append(t.errors, TestError{message: str, stack: buf.Bytes()}) -} - -func (t *TestingT) Skip(args ...interface{}) { - t.skipped = true -} - -func (t *TestingT) Skipped() bool { - return t.skipped -} - -func (t *TestingT) Failed() bool { - return len(t.errors) > 0 -} - -func (t *TestingT) Error(err error) { - t.errors = append(t.errors, TestError{message: err.Error()}) -} diff --git a/vendor/github.com/lsegal/gucumber/stepdef.go b/vendor/github.com/lsegal/gucumber/stepdef.go deleted file mode 100644 index f9314a8..0000000 --- a/vendor/github.com/lsegal/gucumber/stepdef.go +++ /dev/null @@ -1,224 +0,0 @@ -package gucumber - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/gucumber/gucumber/gherkin" -) - -var ( - GlobalContext = Context{ - Steps: []StepDefinition{}, - World: map[string]interface{}{}, - BeforeFilters: map[string]func(){}, - AfterFilters: map[string]func(){}, - Filters: []string{}, - } - - T Tester - - World = GlobalContext.World - - errNoMatchingStepFns = fmt.Errorf("no functions matched step.") -) - -func Given(match string, fn interface{}) { - GlobalContext.Given(match, fn) -} - -func Then(match string, fn interface{}) { - GlobalContext.Then(match, fn) -} - -func When(match string, fn interface{}) { - GlobalContext.When(match, fn) -} - -func And(match string, fn interface{}) { - GlobalContext.And(match, fn) -} - -func Before(filter string, fn func()) { - GlobalContext.Before(filter, fn) -} - -func After(filter string, fn func()) { - GlobalContext.After(filter, fn) -} - -func BeforeMulti(filters []string, fn func()) { - GlobalContext.BeforeMulti(filters, fn) -} - -func AfterMulti(filters []string, fn func()) { - GlobalContext.AfterMulti(filters, fn) -} - -func BeforeAll(fn func()) { - GlobalContext.BeforeAll(fn) -} - -func AfterAll(fn func()) { - GlobalContext.AfterAll(fn) -} - -func Execute(t Tester, line string, arg string) (bool, error) { - return GlobalContext.Execute(t, line, arg) -} - -type Context struct { - Filters []string - World map[string]interface{} - BeforeFilters map[string]func() - AfterFilters map[string]func() - BeforeAllFilter func() - AfterAllFilter func() - Steps []StepDefinition - T Tester -} - -func (c *Context) addStep(match string, fn interface{}) { - c.Steps = append(c.Steps, StepDefinition{ - Matcher: regexp.MustCompile(match), - Function: reflect.ValueOf(fn), - }) -} - -func (c *Context) Given(match string, fn interface{}) { - c.addStep(match, fn) -} - -func (c *Context) Then(match string, fn interface{}) { - c.addStep(match, fn) -} - -func (c *Context) When(match string, fn interface{}) { - c.addStep(match, fn) -} - -func (c *Context) And(match string, fn interface{}) { - c.addStep(match, fn) -} - -func (c *Context) Before(filter string, fn func()) { - c.BeforeFilters[filter] = fn -} - -func (c *Context) After(filter string, fn func()) { - c.AfterFilters[filter] = fn -} - -func (c *Context) BeforeMulti(filters []string, fn func()) { - c.BeforeFilters[strings.Join(filters, "|")] = fn -} - -func (c *Context) AfterMulti(filters []string, fn func()) { - c.AfterFilters[strings.Join(filters, "|")] = fn -} - -func (c *Context) BeforeAll(fn func()) { - c.BeforeAllFilter = fn -} - -func (c *Context) AfterAll(fn func()) { - c.AfterAllFilter = fn -} - -func (c *Context) Execute(t Tester, line string, arg string) (bool, error) { - T = t - c.T = t - - found := false - for _, step := range c.Steps { - f, err := step.CallIfMatch(c, t, line, arg) - if err != nil { - return f, err - } - if f { - found = true - } - } - - return found, nil -} - -type StepDefinition struct { - Matcher *regexp.Regexp - Function reflect.Value -} - -func (s *StepDefinition) CallIfMatch(c *Context, test Tester, line string, arg string) (bool, error) { - if match := s.Matcher.FindStringSubmatch(line); match != nil { - match = match[1:] // discard full line match - - // adjust arity if there is step arg data - numArgs := len(match) - if arg != "" { - numArgs++ - } - - t := s.Function.Type() - if t.NumIn() > 0 && t.In(0).Kind() == reflect.Ptr { - e := t.In(0).Elem() - if e.String() == "testing.T" { - numArgs++ // first param is *testing.T - } - } - if numArgs != t.NumIn() { // function has different arity - return true, fmt.Errorf("matcher function has different arity %d != %d", - numArgs, t.NumIn()) - } - - values := make([]reflect.Value, numArgs) - for m, i := 0, 0; i < t.NumIn(); i++ { - param := t.In(i) - - var v interface{} - switch param.Kind() { - case reflect.Slice: - param = param.Elem() - if param.String() == "gherkin.TabularData" { - v = gherkin.StringData(arg).ToTable() - } else if param.Kind() == reflect.Slice && param.Elem().Kind() == reflect.String { - // just a raw [][]string slice - v = gherkin.StringData(arg).ToTable() - } - case reflect.Ptr: - if param.Elem().String() == "testing.T" { - v = test - } - case reflect.Int: - i, _ := strconv.ParseInt(match[m], 10, 32) - v = int(i) - m++ - case reflect.Int64: - v, _ = strconv.ParseInt(match[m], 10, 64) - m++ - case reflect.String: - // this could be from `arg`, check match index - if m >= len(match) { - v = arg - } else { - v = match[m] - m++ - } - case reflect.Float64: - v, _ = strconv.ParseFloat(match[m], 64) - m++ - } - - if v == nil { - panic("type " + t.String() + "is not supported.") - } - values[i] = reflect.ValueOf(v) - } - - s.Function.Call(values) - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692..0000000 --- a/vendor/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md deleted file mode 100644 index 1e69004..0000000 --- a/vendor/github.com/mattn/go-isatty/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# go-isatty - -[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { - fmt.Println("Is Cygwin/MSYS2 Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -## License - -MIT - -## Author - -Yasuhiro Matsumoto (a.k.a mattn) - -## Thanks - -* k-takata: base idea for IsCygwinTerminal - - https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go deleted file mode 100644 index 17d4f90..0000000 --- a/vendor/github.com/mattn/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go deleted file mode 100644 index 9584a98..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go deleted file mode 100644 index 42f2514..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go deleted file mode 100644 index 9d24bac..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go deleted file mode 100644 index ff4de3d..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows -// +build !appengine - -package isatty - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go deleted file mode 100644 index 1f0c6bf..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build solaris -// +build !appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go deleted file mode 100644 index af51cbc..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build windows -// +build !appengine - -package isatty - -import ( - "strings" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - fileNameInfo uintptr = 2 - fileTypePipe = 3 -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") - procGetFileType = kernel32.NewProc("GetFileType") -) - -func init() { - // Check if GetFileInformationByHandleEx is available. - if procGetFileInformationByHandleEx.Find() != nil { - procGetFileInformationByHandleEx = nil - } -} - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// Check pipe name is used for cygwin/msys2 pty. -// Cygwin/MSYS2 PTY has a name like: -// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master -func isCygwinPipeName(name string) bool { - token := strings.Split(name, "-") - if len(token) < 5 { - return false - } - - if token[0] != `\msys` && token[0] != `\cygwin` { - return false - } - - if token[1] == "" { - return false - } - - if !strings.HasPrefix(token[2], "pty") { - return false - } - - if token[3] != `from` && token[3] != `to` { - return false - } - - if token[4] != "master" { - return false - } - - return true -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. -func IsCygwinTerminal(fd uintptr) bool { - if procGetFileInformationByHandleEx == nil { - return false - } - - // Cygwin/msys's pty is a pipe. - ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) - if ft != fileTypePipe || e != 0 { - return false - } - - var buf [2 + syscall.MAX_PATH]uint16 - r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), - 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), - uintptr(len(buf)*2), 0, 0) - if r == 0 || e != 0 { - return false - } - - l := *(*uint32)(unsafe.Pointer(&buf)) - return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) -} diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/mitchellh/cli/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile deleted file mode 100644 index 4874b00..0000000 --- a/vendor/github.com/mitchellh/cli/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code -test: - go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) - -# testrace runs the race checker -testrace: - go list $(TEST) | xargs -n1 go test -race $(TESTARGS) - -# updatedeps installs all the dependencies to run and build -updatedeps: - go list ./... \ - | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ - | grep -v github.com/mitchellh/cli \ - | xargs go get -f -u -v - -.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md deleted file mode 100644 index 8f02cdd..0000000 --- a/vendor/github.com/mitchellh/cli/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) - -cli is a library for implementing powerful command-line interfaces in Go. -cli is the library that powers the CLI for -[Packer](https://github.com/mitchellh/packer), -[Serf](https://github.com/hashicorp/serf), -[Consul](https://github.com/hashicorp/consul), -[Vault](https://github.com/hashicorp/vault), -[Terraform](https://github.com/hashicorp/terraform), and -[Nomad](https://github.com/hashicorp/nomad). - -## Features - -* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. - -* Support for nested subcommands such as `cli foo bar`. - -* Optional support for default subcommands so `cli` does something - other than error. - -* Support for shell autocompletion of subcommands, flags, and arguments - with callbacks in Go. You don't need to write any shell code. - -* Automatic help generation for listing subcommands - -* Automatic help flag recognition of `-h`, `--help`, etc. - -* Automatic version flag recognition of `-v`, `--version`. - -* Helpers for interacting with the terminal, such as outputting information, - asking for input, etc. These are optional, you can always interact with the - terminal however you choose. - -* Use of Go interfaces/types makes augmenting various parts of the library a - piece of cake. - -## Example - -Below is a simple example of creating and running a CLI - -```go -package main - -import ( - "log" - "os" - - "github.com/mitchellh/cli" -) - -func main() { - c := cli.NewCLI("app", "1.0.0") - c.Args = os.Args[1:] - c.Commands = map[string]cli.CommandFactory{ - "foo": fooCommandFactory, - "bar": barCommandFactory, - } - - exitStatus, err := c.Run() - if err != nil { - log.Println(err) - } - - os.Exit(exitStatus) -} -``` - diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go deleted file mode 100644 index 3bec625..0000000 --- a/vendor/github.com/mitchellh/cli/autocomplete.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "github.com/posener/complete/cmd/install" -) - -// autocompleteInstaller is an interface to be implemented to perform the -// autocomplete installation and uninstallation with a CLI. -// -// This interface is not exported because it only exists for unit tests -// to be able to test that the installation is called properly. -type autocompleteInstaller interface { - Install(string) error - Uninstall(string) error -} - -// realAutocompleteInstaller uses the real install package to do the -// install/uninstall. -type realAutocompleteInstaller struct{} - -func (i *realAutocompleteInstaller) Install(cmd string) error { - return install.Install(cmd) -} - -func (i *realAutocompleteInstaller) Uninstall(cmd string) error { - return install.Uninstall(cmd) -} - -// mockAutocompleteInstaller is used for tests to record the install/uninstall. -type mockAutocompleteInstaller struct { - InstallCalled bool - UninstallCalled bool -} - -func (i *mockAutocompleteInstaller) Install(cmd string) error { - i.InstallCalled = true - return nil -} - -func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { - i.UninstallCalled = true - return nil -} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go deleted file mode 100644 index 273fbc3..0000000 --- a/vendor/github.com/mitchellh/cli/cli.go +++ /dev/null @@ -1,664 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "regexp" - "sort" - "strings" - "sync" - "text/template" - - "github.com/armon/go-radix" - "github.com/posener/complete" -) - -// CLI contains the state necessary to run subcommands and parse the -// command line arguments. -// -// CLI also supports nested subcommands, such as "cli foo bar". To use -// nested subcommands, the key in the Commands mapping below contains the -// full subcommand. In this example, it would be "foo bar". -// -// If you use a CLI with nested subcommands, some semantics change due to -// ambiguities: -// -// * We use longest prefix matching to find a matching subcommand. This -// means if you register "foo bar" and the user executes "cli foo qux", -// the "foo" command will be executed with the arg "qux". It is up to -// you to handle these args. One option is to just return the special -// help return code `RunResultHelp` to display help and exit. -// -// * The help flag "-h" or "-help" will look at all args to determine -// the help function. For example: "otto apps list -h" will show the -// help for "apps list" but "otto apps -h" will show it for "apps". -// In the normal CLI, only the first subcommand is used. -// -// * The help flag will list any subcommands that a command takes -// as well as the command's help itself. If there are no subcommands, -// it will note this. If the CLI itself has no subcommands, this entire -// section is omitted. -// -// * Any parent commands that don't exist are automatically created as -// no-op commands that just show help for other subcommands. For example, -// if you only register "foo bar", then "foo" is automatically created. -// -type CLI struct { - // Args is the list of command-line arguments received excluding - // the name of the app. For example, if the command "./cli foo bar" - // was invoked, then Args should be []string{"foo", "bar"}. - Args []string - - // Commands is a mapping of subcommand names to a factory function - // for creating that Command implementation. If there is a command - // with a blank string "", then it will be used as the default command - // if no subcommand is specified. - // - // If the key has a space in it, this will create a nested subcommand. - // For example, if the key is "foo bar", then to access it our CLI - // must be accessed with "./cli foo bar". See the docs for CLI for - // notes on how this changes some other behavior of the CLI as well. - Commands map[string]CommandFactory - - // Name defines the name of the CLI. - Name string - - // Version of the CLI. - Version string - - // Autocomplete enables or disables subcommand auto-completion support. - // This is enabled by default when NewCLI is called. Otherwise, this - // must enabled explicitly. - // - // Autocomplete requires the "Name" option to be set on CLI. This name - // should be set exactly to the binary name that is autocompleted. - // - // Autocompletion is supported via the github.com/posener/complete - // library. This library supports both bash and zsh. To add support - // for other shells, please see that library. - // - // AutocompleteInstall and AutocompleteUninstall are the global flag - // names for installing and uninstalling the autocompletion handlers - // for the user's shell. The flag should omit the hyphen(s) in front of - // the value. Both single and double hyphens will automatically be supported - // for the flag name. These default to `autocomplete-install` and - // `autocomplete-uninstall` respectively. - // - // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- - // complete flags like -help and -version are added to the output. - // - // AutocompleteGlobalFlags are a mapping of global flags for - // autocompletion. The help and version flags are automatically added. - Autocomplete bool - AutocompleteInstall string - AutocompleteUninstall string - AutocompleteNoDefaultFlags bool - AutocompleteGlobalFlags complete.Flags - autocompleteInstaller autocompleteInstaller // For tests - - // HelpFunc and HelpWriter are used to output help information, if - // requested. - // - // HelpFunc is the function called to generate the generic help - // text that is shown if help must be shown for the CLI that doesn't - // pertain to a specific command. - // - // HelpWriter is the Writer where the help text is outputted to. If - // not specified, it will default to Stderr. - HelpFunc HelpFunc - HelpWriter io.Writer - - //--------------------------------------------------------------- - // Internal fields set automatically - - once sync.Once - autocomplete *complete.Complete - commandTree *radix.Tree - commandNested bool - subcommand string - subcommandArgs []string - topFlags []string - - // These are true when special global flags are set. We can/should - // probably use a bitset for this one day. - isHelp bool - isVersion bool - isAutocompleteInstall bool - isAutocompleteUninstall bool -} - -// NewClI returns a new CLI instance with sensible defaults. -func NewCLI(app, version string) *CLI { - return &CLI{ - Name: app, - Version: version, - HelpFunc: BasicHelpFunc(app), - Autocomplete: true, - } - -} - -// IsHelp returns whether or not the help flag is present within the -// arguments. -func (c *CLI) IsHelp() bool { - c.once.Do(c.init) - return c.isHelp -} - -// IsVersion returns whether or not the version flag is present within the -// arguments. -func (c *CLI) IsVersion() bool { - c.once.Do(c.init) - return c.isVersion -} - -// Run runs the actual CLI based on the arguments given. -func (c *CLI) Run() (int, error) { - c.once.Do(c.init) - - // If this is a autocompletion request, satisfy it. This must be called - // first before anything else since its possible to be autocompleting - // -help or -version or other flags and we want to show completions - // and not actually write the help or version. - if c.Autocomplete && c.autocomplete.Complete() { - return 0, nil - } - - // Just show the version and exit if instructed. - if c.IsVersion() && c.Version != "" { - c.HelpWriter.Write([]byte(c.Version + "\n")) - return 0, nil - } - - // Just print the help when only '-h' or '--help' is passed. - if c.IsHelp() && c.Subcommand() == "" { - c.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + "\n")) - return 0, nil - } - - // If we're attempting to install or uninstall autocomplete then handle - if c.Autocomplete { - // Autocomplete requires the "Name" to be set so that we know what - // command to setup the autocomplete on. - if c.Name == "" { - return 1, fmt.Errorf( - "internal error: CLI.Name must be specified for autocomplete to work") - } - - // If both install and uninstall flags are specified, then error - if c.isAutocompleteInstall && c.isAutocompleteUninstall { - return 1, fmt.Errorf( - "Either the autocomplete install or uninstall flag may " + - "be specified, but not both.") - } - - // If the install flag is specified, perform the install or uninstall - if c.isAutocompleteInstall { - if err := c.autocompleteInstaller.Install(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - - if c.isAutocompleteUninstall { - if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - } - - // Attempt to get the factory function for creating the command - // implementation. If the command is invalid or blank, it is an error. - raw, ok := c.commandTree.Get(c.Subcommand()) - if !ok { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) - return 1, nil - } - - command, err := raw.(CommandFactory)() - if err != nil { - return 1, err - } - - // If we've been instructed to just print the help, then print it - if c.IsHelp() { - c.commandHelp(command) - return 0, nil - } - - // If there is an invalid flag, then error - if len(c.topFlags) > 0 { - c.HelpWriter.Write([]byte( - "Invalid flags before the subcommand. If these flags are for\n" + - "the subcommand, please put them after the subcommand.\n\n")) - c.commandHelp(command) - return 1, nil - } - - code := command.Run(c.SubcommandArgs()) - if code == RunResultHelp { - // Requesting help - c.commandHelp(command) - return 1, nil - } - - return code, nil -} - -// Subcommand returns the subcommand that the CLI would execute. For -// example, a CLI from "--version version --help" would return a Subcommand -// of "version" -func (c *CLI) Subcommand() string { - c.once.Do(c.init) - return c.subcommand -} - -// SubcommandArgs returns the arguments that will be passed to the -// subcommand. -func (c *CLI) SubcommandArgs() []string { - c.once.Do(c.init) - return c.subcommandArgs -} - -// subcommandParent returns the parent of this subcommand, if there is one. -// If there isn't on, "" is returned. -func (c *CLI) subcommandParent() string { - // Get the subcommand, if it is "" alread just return - sub := c.Subcommand() - if sub == "" { - return sub - } - - // Clear any trailing spaces and find the last space - sub = strings.TrimRight(sub, " ") - idx := strings.LastIndex(sub, " ") - - if idx == -1 { - // No space means our parent is root - return "" - } - - return sub[:idx] -} - -func (c *CLI) init() { - if c.HelpFunc == nil { - c.HelpFunc = BasicHelpFunc("app") - - if c.Name != "" { - c.HelpFunc = BasicHelpFunc(c.Name) - } - } - - if c.HelpWriter == nil { - c.HelpWriter = os.Stderr - } - - // Build our command tree - c.commandTree = radix.New() - c.commandNested = false - for k, v := range c.Commands { - k = strings.TrimSpace(k) - c.commandTree.Insert(k, v) - if strings.ContainsRune(k, ' ') { - c.commandNested = true - } - } - - // Go through the key and fill in any missing parent commands - if c.commandNested { - var walkFn radix.WalkFn - toInsert := make(map[string]struct{}) - walkFn = func(k string, raw interface{}) bool { - idx := strings.LastIndex(k, " ") - if idx == -1 { - // If there is no space, just ignore top level commands - return false - } - - // Trim up to that space so we can get the expected parent - k = k[:idx] - if _, ok := c.commandTree.Get(k); ok { - // Yay we have the parent! - return false - } - - // We're missing the parent, so let's insert this - toInsert[k] = struct{}{} - - // Call the walk function recursively so we check this one too - return walkFn(k, nil) - } - - // Walk! - c.commandTree.Walk(walkFn) - - // Insert any that we're missing - for k := range toInsert { - var f CommandFactory = func() (Command, error) { - return &MockCommand{ - HelpText: "This command is accessed by using one of the subcommands below.", - RunResult: RunResultHelp, - }, nil - } - - c.commandTree.Insert(k, f) - } - } - - // Setup autocomplete if we have it enabled. We have to do this after - // the command tree is setup so we can use the radix tree to easily find - // all subcommands. - if c.Autocomplete { - c.initAutocomplete() - } - - // Process the args - c.processArgs() -} - -func (c *CLI) initAutocomplete() { - if c.AutocompleteInstall == "" { - c.AutocompleteInstall = defaultAutocompleteInstall - } - - if c.AutocompleteUninstall == "" { - c.AutocompleteUninstall = defaultAutocompleteUninstall - } - - if c.autocompleteInstaller == nil { - c.autocompleteInstaller = &realAutocompleteInstaller{} - } - - // Build the root command - cmd := c.initAutocompleteSub("") - - // For the root, we add the global flags to the "Flags". This way - // they don't show up on every command. - if !c.AutocompleteNoDefaultFlags { - cmd.Flags = map[string]complete.Predictor{ - "-" + c.AutocompleteInstall: complete.PredictNothing, - "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, - } - } - cmd.GlobalFlags = c.AutocompleteGlobalFlags - - c.autocomplete = complete.New(c.Name, cmd) -} - -// initAutocompleteSub creates the complete.Command for a subcommand with -// the given prefix. This will continue recursively for all subcommands. -// The prefix "" (empty string) can be used for the root command. -func (c *CLI) initAutocompleteSub(prefix string) complete.Command { - var cmd complete.Command - walkFn := func(k string, raw interface{}) bool { - // Keep track of the full key so that we can nest further if necessary - fullKey := k - - if len(prefix) > 0 { - // If we have a prefix, trim the prefix + 1 (for the space) - // Example: turns "sub one" to "one" with prefix "sub" - k = k[len(prefix)+1:] - } - - if idx := strings.Index(k, " "); idx >= 0 { - // If there is a space, we trim up to the space. This turns - // "sub sub2 sub3" into "sub". The prefix trim above will - // trim our current depth properly. - k = k[:idx] - } - - if _, ok := cmd.Sub[k]; ok { - // If we already tracked this subcommand then ignore - return false - } - - if cmd.Sub == nil { - cmd.Sub = complete.Commands(make(map[string]complete.Command)) - } - subCmd := c.initAutocompleteSub(fullKey) - - // Instantiate the command so that we can check if the command is - // a CommandAutocomplete implementation. If there is an error - // creating the command, we just ignore it since that will be caught - // later. - impl, err := raw.(CommandFactory)() - if err != nil { - impl = nil - } - - // Check if it implements ComandAutocomplete. If so, setup the autocomplete - if c, ok := impl.(CommandAutocomplete); ok { - subCmd.Args = c.AutocompleteArgs() - subCmd.Flags = c.AutocompleteFlags() - } - - cmd.Sub[k] = subCmd - return false - } - - walkPrefix := prefix - if walkPrefix != "" { - walkPrefix += " " - } - - c.commandTree.WalkPrefix(walkPrefix, walkFn) - return cmd -} - -func (c *CLI) commandHelp(command Command) { - // Get the template to use - tpl := strings.TrimSpace(defaultHelpTemplate) - if t, ok := command.(CommandHelpTemplate); ok { - tpl = t.HelpTemplate() - } - if !strings.HasSuffix(tpl, "\n") { - tpl += "\n" - } - - // Parse it - t, err := template.New("root").Parse(tpl) - if err != nil { - t = template.Must(template.New("root").Parse(fmt.Sprintf( - "Internal error! Failed to parse command help template: %s\n", err))) - } - - // Template data - data := map[string]interface{}{ - "Name": c.Name, - "Help": command.Help(), - } - - // Build subcommand list if we have it - var subcommandsTpl []map[string]interface{} - if c.commandNested { - // Get the matching keys - subcommands := c.helpCommands(c.Subcommand()) - keys := make([]string, 0, len(subcommands)) - for k := range subcommands { - keys = append(keys, k) - } - - // Sort the keys - sort.Strings(keys) - - // Figure out the padding length - var longest int - for _, k := range keys { - if v := len(k); v > longest { - longest = v - } - } - - // Go through and create their structures - subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) - for _, k := range keys { - // Get the command - raw, ok := subcommands[k] - if !ok { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error getting subcommand %q", k))) - } - sub, err := raw() - if err != nil { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error instantiating %q: %s", k, err))) - } - - // Find the last space and make sure we only include that last part - name := k - if idx := strings.LastIndex(k, " "); idx > -1 { - name = name[idx+1:] - } - - subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ - "Name": name, - "NameAligned": name + strings.Repeat(" ", longest-len(k)), - "Help": sub.Help(), - "Synopsis": sub.Synopsis(), - }) - } - } - data["Subcommands"] = subcommandsTpl - - // Write - err = t.Execute(c.HelpWriter, data) - if err == nil { - return - } - - // An error, just output... - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Internal error rendering help: %s", err))) -} - -// helpCommands returns the subcommands for the HelpFunc argument. -// This will only contain immediate subcommands. -func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { - // If our prefix isn't empty, make sure it ends in ' ' - if prefix != "" && prefix[len(prefix)-1] != ' ' { - prefix += " " - } - - // Get all the subkeys of this command - var keys []string - c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { - // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" - if !strings.Contains(k[len(prefix):], " ") { - keys = append(keys, k) - } - - return false - }) - - // For each of the keys return that in the map - result := make(map[string]CommandFactory, len(keys)) - for _, k := range keys { - raw, ok := c.commandTree.Get(k) - if !ok { - // We just got it via WalkPrefix above, so we just panic - panic("not found: " + k) - } - - result[k] = raw.(CommandFactory) - } - - return result -} - -func (c *CLI) processArgs() { - for i, arg := range c.Args { - if arg == "--" { - break - } - - // Check for help flags. - if arg == "-h" || arg == "-help" || arg == "--help" { - c.isHelp = true - continue - } - - // Check for autocomplete flags - if c.Autocomplete { - if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { - c.isAutocompleteInstall = true - continue - } - - if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { - c.isAutocompleteUninstall = true - continue - } - } - - if c.subcommand == "" { - // Check for version flags if not in a subcommand. - if arg == "-v" || arg == "-version" || arg == "--version" { - c.isVersion = true - continue - } - - if arg != "" && arg[0] == '-' { - // Record the arg... - c.topFlags = append(c.topFlags, arg) - } - } - - // If we didn't find a subcommand yet and this is the first non-flag - // argument, then this is our subcommand. - if c.subcommand == "" && arg != "" && arg[0] != '-' { - c.subcommand = arg - if c.commandNested { - // Nested CLI, the subcommand is actually the entire - // arg list up to a flag that is still a valid subcommand. - searchKey := strings.Join(c.Args[i:], " ") - k, _, ok := c.commandTree.LongestPrefix(searchKey) - if ok { - // k could be a prefix that doesn't contain the full - // command such as "foo" instead of "foobar", so we - // need to verify that we have an entire key. To do that, - // we look for an ending in a space or an end of string. - reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) - if reVerify.MatchString(searchKey) { - c.subcommand = k - i += strings.Count(k, " ") - } - } - } - - // The remaining args the subcommand arguments - c.subcommandArgs = c.Args[i+1:] - } - } - - // If we never found a subcommand and support a default command, then - // switch to using that. - if c.subcommand == "" { - if _, ok := c.Commands[""]; ok { - args := c.topFlags - args = append(args, c.subcommandArgs...) - c.topFlags = nil - c.subcommandArgs = args - } - } -} - -// defaultAutocompleteInstall and defaultAutocompleteUninstall are the -// default values for the autocomplete install and uninstall flags. -const defaultAutocompleteInstall = "autocomplete-install" -const defaultAutocompleteUninstall = "autocomplete-uninstall" - -const defaultHelpTemplate = ` -{{.Help}}{{if gt (len .Subcommands) 0}} - -Subcommands: -{{- range $value := .Subcommands }} - {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} -{{- end }} -` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go deleted file mode 100644 index bed11fa..0000000 --- a/vendor/github.com/mitchellh/cli/command.go +++ /dev/null @@ -1,67 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -const ( - // RunResultHelp is a value that can be returned from Run to signal - // to the CLI to render the help output. - RunResultHelp = -18511 -) - -// A command is a runnable sub-command of a CLI. -type Command interface { - // Help should return long-form help text that includes the command-line - // usage, a brief few sentences explaining the function of the command, - // and the complete list of flags the command accepts. - Help() string - - // Run should run the actual command with the given CLI instance and - // command-line arguments. It should return the exit status when it is - // finished. - // - // There are a handful of special exit codes this can return documented - // above that change behavior. - Run(args []string) int - - // Synopsis should return a one-line, short synopsis of the command. - // This should be less than 50 characters ideally. - Synopsis() string -} - -// CommandAutocomplete is an extension of Command that enables fine-grained -// autocompletion. Subcommand autocompletion will work even if this interface -// is not implemented. By implementing this interface, more advanced -// autocompletion is enabled. -type CommandAutocomplete interface { - // AutocompleteArgs returns the argument predictor for this command. - // If argument completion is not supported, this should return - // complete.PredictNothing. - AutocompleteArgs() complete.Predictor - - // AutocompleteFlags returns a mapping of supported flags and autocomplete - // options for this command. The map key for the Flags map should be the - // complete flag such as "-foo" or "--foo". - AutocompleteFlags() complete.Flags -} - -// CommandHelpTemplate is an extension of Command that also has a function -// for returning a template for the help rather than the help itself. In -// this scenario, both Help and HelpTemplate should be implemented. -// -// If CommandHelpTemplate isn't implemented, the Help is output as-is. -type CommandHelpTemplate interface { - // HelpTemplate is the template in text/template format to use for - // displaying the Help. The keys available are: - // - // * ".Help" - The help text itself - // * ".Subcommands" - // - HelpTemplate() string -} - -// CommandFactory is a type of function that is a factory for commands. -// We need a factory because we may need to setup some state on the -// struct that implements the command itself. -type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go deleted file mode 100644 index 7a584b7..0000000 --- a/vendor/github.com/mitchellh/cli/command_mock.go +++ /dev/null @@ -1,63 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -// MockCommand is an implementation of Command that can be used for tests. -// It is publicly exported from this package in case you want to use it -// externally. -type MockCommand struct { - // Settable - HelpText string - RunResult int - SynopsisText string - - // Set by the command - RunCalled bool - RunArgs []string -} - -func (c *MockCommand) Help() string { - return c.HelpText -} - -func (c *MockCommand) Run(args []string) int { - c.RunCalled = true - c.RunArgs = args - - return c.RunResult -} - -func (c *MockCommand) Synopsis() string { - return c.SynopsisText -} - -// MockCommandAutocomplete is an implementation of CommandAutocomplete. -type MockCommandAutocomplete struct { - MockCommand - - // Settable - AutocompleteArgsValue complete.Predictor - AutocompleteFlagsValue complete.Flags -} - -func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { - return c.AutocompleteArgsValue -} - -func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { - return c.AutocompleteFlagsValue -} - -// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. -type MockCommandHelpTemplate struct { - MockCommand - - // Settable - HelpTemplateText string -} - -func (c *MockCommandHelpTemplate) HelpTemplate() string { - return c.HelpTemplateText -} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go deleted file mode 100644 index f5ca58f..0000000 --- a/vendor/github.com/mitchellh/cli/help.go +++ /dev/null @@ -1,79 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" -) - -// HelpFunc is the type of the function that is responsible for generating -// the help output when the CLI must show the general help text. -type HelpFunc func(map[string]CommandFactory) string - -// BasicHelpFunc generates some basic help output that is usually good enough -// for most CLI applications. -func BasicHelpFunc(app string) HelpFunc { - return func(commands map[string]CommandFactory) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf( - "Usage: %s [--version] [--help] []\n\n", - app)) - buf.WriteString("Available commands are:\n") - - // Get the list of keys so we can sort them, and also get the maximum - // key length so they can be aligned properly. - keys := make([]string, 0, len(commands)) - maxKeyLen := 0 - for key := range commands { - if len(key) > maxKeyLen { - maxKeyLen = len(key) - } - - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - commandFunc, ok := commands[key] - if !ok { - // This should never happen since we JUST built the list of - // keys. - panic("command not found: " + key) - } - - command, err := commandFunc() - if err != nil { - log.Printf("[ERR] cli: Command '%s' failed to load: %s", - key, err) - continue - } - - key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) - buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) - } - - return buf.String() - } -} - -// FilteredHelpFunc will filter the commands to only include the keys -// in the include parameter. -func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { - return func(commands map[string]CommandFactory) string { - set := make(map[string]struct{}) - for _, k := range include { - set[k] = struct{}{} - } - - filtered := make(map[string]CommandFactory) - for k, f := range commands { - if _, ok := set[k]; ok { - filtered[k] = f - } - } - - return f(filtered) - } -} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go deleted file mode 100644 index a2d6f94..0000000 --- a/vendor/github.com/mitchellh/cli/ui.go +++ /dev/null @@ -1,187 +0,0 @@ -package cli - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/signal" - "strings" - - "github.com/bgentry/speakeasy" - "github.com/mattn/go-isatty" -) - -// Ui is an interface for interacting with the terminal, or "interface" -// of a CLI. This abstraction doesn't have to be used, but helps provide -// a simple, layerable way to manage user interactions. -type Ui interface { - // Ask asks the user for input using the given query. The response is - // returned as the given string, or an error. - Ask(string) (string, error) - - // AskSecret asks the user for input using the given query, but does not echo - // the keystrokes to the terminal. - AskSecret(string) (string, error) - - // Output is called for normal standard output. - Output(string) - - // Info is called for information related to the previous output. - // In general this may be the exact same as Output, but this gives - // Ui implementors some flexibility with output formats. - Info(string) - - // Error is used for any error messages that might appear on standard - // error. - Error(string) - - // Warn is used for any warning messages that might appear on standard - // error. - Warn(string) -} - -// BasicUi is an implementation of Ui that just outputs to the given -// writer. This UI is not threadsafe by default, but you can wrap it -// in a ConcurrentUi to make it safe. -type BasicUi struct { - Reader io.Reader - Writer io.Writer - ErrorWriter io.Writer -} - -func (u *BasicUi) Ask(query string) (string, error) { - return u.ask(query, false) -} - -func (u *BasicUi) AskSecret(query string) (string, error) { - return u.ask(query, true) -} - -func (u *BasicUi) ask(query string, secret bool) (string, error) { - if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { - return "", err - } - - // Register for interrupts so that we can catch it and immediately - // return... - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Ask for input in a go-routine so that we can ignore it. - errCh := make(chan error, 1) - lineCh := make(chan string, 1) - go func() { - var line string - var err error - if secret && isatty.IsTerminal(os.Stdin.Fd()) { - line, err = speakeasy.Ask("") - } else { - r := bufio.NewReader(u.Reader) - line, err = r.ReadString('\n') - } - if err != nil { - errCh <- err - return - } - - lineCh <- strings.TrimRight(line, "\r\n") - }() - - select { - case err := <-errCh: - return "", err - case line := <-lineCh: - return line, nil - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(u.Writer) - - return "", errors.New("interrupted") - } -} - -func (u *BasicUi) Error(message string) { - w := u.Writer - if u.ErrorWriter != nil { - w = u.ErrorWriter - } - - fmt.Fprint(w, message) - fmt.Fprint(w, "\n") -} - -func (u *BasicUi) Info(message string) { - u.Output(message) -} - -func (u *BasicUi) Output(message string) { - fmt.Fprint(u.Writer, message) - fmt.Fprint(u.Writer, "\n") -} - -func (u *BasicUi) Warn(message string) { - u.Error(message) -} - -// PrefixedUi is an implementation of Ui that prefixes messages. -type PrefixedUi struct { - AskPrefix string - AskSecretPrefix string - OutputPrefix string - InfoPrefix string - ErrorPrefix string - WarnPrefix string - Ui Ui -} - -func (u *PrefixedUi) Ask(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskPrefix, query) - } - - return u.Ui.Ask(query) -} - -func (u *PrefixedUi) AskSecret(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) - } - - return u.Ui.AskSecret(query) -} - -func (u *PrefixedUi) Error(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) - } - - u.Ui.Error(message) -} - -func (u *PrefixedUi) Info(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.InfoPrefix, message) - } - - u.Ui.Info(message) -} - -func (u *PrefixedUi) Output(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.OutputPrefix, message) - } - - u.Ui.Output(message) -} - -func (u *PrefixedUi) Warn(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.WarnPrefix, message) - } - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go deleted file mode 100644 index e3d5131..0000000 --- a/vendor/github.com/mitchellh/cli/ui_colored.go +++ /dev/null @@ -1,69 +0,0 @@ -package cli - -import ( - "fmt" -) - -// UiColor is a posix shell color code to use. -type UiColor struct { - Code int - Bold bool -} - -// A list of colors that are useful. These are all non-bolded by default. -var ( - UiColorNone UiColor = UiColor{-1, false} - UiColorRed = UiColor{31, false} - UiColorGreen = UiColor{32, false} - UiColorYellow = UiColor{33, false} - UiColorBlue = UiColor{34, false} - UiColorMagenta = UiColor{35, false} - UiColorCyan = UiColor{36, false} -) - -// ColoredUi is a Ui implementation that colors its output according -// to the given color schemes for the given type of output. -type ColoredUi struct { - OutputColor UiColor - InfoColor UiColor - ErrorColor UiColor - WarnColor UiColor - Ui Ui -} - -func (u *ColoredUi) Ask(query string) (string, error) { - return u.Ui.Ask(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) AskSecret(query string) (string, error) { - return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) Output(message string) { - u.Ui.Output(u.colorize(message, u.OutputColor)) -} - -func (u *ColoredUi) Info(message string) { - u.Ui.Info(u.colorize(message, u.InfoColor)) -} - -func (u *ColoredUi) Error(message string) { - u.Ui.Error(u.colorize(message, u.ErrorColor)) -} - -func (u *ColoredUi) Warn(message string) { - u.Ui.Warn(u.colorize(message, u.WarnColor)) -} - -func (u *ColoredUi) colorize(message string, color UiColor) string { - if color.Code == -1 { - return message - } - - attr := 0 - if color.Bold { - attr = 1 - } - - return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color.Code, message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go deleted file mode 100644 index b4f4dbf..0000000 --- a/vendor/github.com/mitchellh/cli/ui_concurrent.go +++ /dev/null @@ -1,54 +0,0 @@ -package cli - -import ( - "sync" -) - -// ConcurrentUi is a wrapper around a Ui interface (and implements that -// interface) making the underlying Ui concurrency safe. -type ConcurrentUi struct { - Ui Ui - l sync.Mutex -} - -func (u *ConcurrentUi) Ask(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.Ask(query) -} - -func (u *ConcurrentUi) AskSecret(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.AskSecret(query) -} - -func (u *ConcurrentUi) Error(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Error(message) -} - -func (u *ConcurrentUi) Info(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Info(message) -} - -func (u *ConcurrentUi) Output(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Output(message) -} - -func (u *ConcurrentUi) Warn(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go deleted file mode 100644 index 0bfe0a1..0000000 --- a/vendor/github.com/mitchellh/cli/ui_mock.go +++ /dev/null @@ -1,111 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "io" - "sync" -) - -// NewMockUi returns a fully initialized MockUi instance -// which is safe for concurrent use. -func NewMockUi() *MockUi { - m := new(MockUi) - m.once.Do(m.init) - return m -} - -// MockUi is a mock UI that is used for tests and is exported publicly -// for use in external tests if needed as well. Do not instantite this -// directly since the buffers will be initialized on the first write. If -// there is no write then you will get a nil panic. Please use the -// NewMockUi() constructor function instead. You can fix your code with -// -// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go -type MockUi struct { - InputReader io.Reader - ErrorWriter *syncBuffer - OutputWriter *syncBuffer - - once sync.Once -} - -func (u *MockUi) Ask(query string) (string, error) { - u.once.Do(u.init) - - var result string - fmt.Fprint(u.OutputWriter, query) - if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { - return "", err - } - - return result, nil -} - -func (u *MockUi) AskSecret(query string) (string, error) { - return u.Ask(query) -} - -func (u *MockUi) Error(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) Info(message string) { - u.Output(message) -} - -func (u *MockUi) Output(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.OutputWriter, message) - fmt.Fprint(u.OutputWriter, "\n") -} - -func (u *MockUi) Warn(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) init() { - u.ErrorWriter = new(syncBuffer) - u.OutputWriter = new(syncBuffer) -} - -type syncBuffer struct { - sync.RWMutex - b bytes.Buffer -} - -func (b *syncBuffer) Write(data []byte) (int, error) { - b.Lock() - defer b.Unlock() - return b.b.Write(data) -} - -func (b *syncBuffer) Read(data []byte) (int, error) { - b.RLock() - defer b.RUnlock() - return b.b.Read(data) -} - -func (b *syncBuffer) Reset() { - b.Lock() - b.b.Reset() - b.Unlock() -} - -func (b *syncBuffer) String() string { - return string(b.Bytes()) -} - -func (b *syncBuffer) Bytes() []byte { - b.RLock() - data := b.b.Bytes() - b.RUnlock() - return data -} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go deleted file mode 100644 index 1e1db3c..0000000 --- a/vendor/github.com/mitchellh/cli/ui_writer.go +++ /dev/null @@ -1,18 +0,0 @@ -package cli - -// UiWriter is an io.Writer implementation that can be used with -// loggers that writes every line of log output data to a Ui at the -// Info level. -type UiWriter struct { - Ui Ui -} - -func (w *UiWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > 0 && p[n-1] == '\n' { - p = p[:n-1] - } - - w.Ui.Info(string(p)) - return n, nil -} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE deleted file mode 100644 index 2298515..0000000 --- a/vendor/github.com/mitchellh/copystructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md deleted file mode 100644 index bcb8c8d..0000000 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go deleted file mode 100644 index db6a6aa..0000000 --- a/vendor/github.com/mitchellh/copystructure/copier_time.go +++ /dev/null @@ -1,15 +0,0 @@ -package copystructure - -import ( - "reflect" - "time" -) - -func init() { - Copiers[reflect.TypeOf(time.Time{})] = timeCopier -} - -func timeCopier(v interface{}) (interface{}, error) { - // Just... copy it. - return v.(time.Time), nil -} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go deleted file mode 100644 index 1404352..0000000 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ /dev/null @@ -1,548 +0,0 @@ -package copystructure - -import ( - "errors" - "reflect" - "sync" - - "github.com/mitchellh/reflectwalk" -) - -// Copy returns a deep copy of v. -func Copy(v interface{}) (interface{}, error) { - return Config{}.Copy(v) -} - -// CopierFunc is a function that knows how to deep copy a specific type. -// Register these globally with the Copiers variable. -type CopierFunc func(interface{}) (interface{}, error) - -// Copiers is a map of types that behave specially when they are copied. -// If a type is found in this map while deep copying, this function -// will be called to copy it instead of attempting to copy all fields. -// -// The key should be the type, obtained using: reflect.TypeOf(value with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) - -// Must is a helper that wraps a call to a function returning -// (interface{}, error) and panics if the error is non-nil. It is intended -// for use in variable initializations and should only be used when a copy -// error should be a crashing case. -func Must(v interface{}, err error) interface{} { - if err != nil { - panic("copy error: " + err.Error()) - } - - return v -} - -var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") - -type Config struct { - // Lock any types that are a sync.Locker and are not a mutex while copying. - // If there is an RLocker method, use that to get the sync.Locker. - Lock bool - - // Copiers is a map of types associated with a CopierFunc. Use the global - // Copiers map if this is nil. - Copiers map[reflect.Type]CopierFunc -} - -func (c Config) Copy(v interface{}) (interface{}, error) { - if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { - return nil, errPointerRequired - } - - w := new(walker) - if c.Lock { - w.useLocks = true - } - - if c.Copiers == nil { - c.Copiers = Copiers - } - - err := reflectwalk.Walk(v, w) - if err != nil { - return nil, err - } - - // Get the result. If the result is nil, then we want to turn it - // into a typed nil if we can. - result := w.Result - if result == nil { - val := reflect.ValueOf(v) - result = reflect.Indirect(reflect.New(val.Type())).Interface() - } - - return result, nil -} - -// Return the key used to index interfaces types we've seen. Store the number -// of pointers in the upper 32bits, and the depth in the lower 32bits. This is -// easy to calculate, easy to match a key with our current depth, and we don't -// need to deal with initializing and cleaning up nested maps or slices. -func ifaceKey(pointers, depth int) uint64 { - return uint64(pointers)<<32 | uint64(depth) -} - -type walker struct { - Result interface{} - - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value - - // This stores the number of pointers we've walked over, indexed by depth. - ps []int - - // If an interface is indirected by a pointer, we need to know the type of - // interface to create when creating the new value. Store the interface - // types here, indexed by both the walk depth and the number of pointers - // already seen at that depth. Use ifaceKey to calculate the proper uint64 - // value. - ifaceTypes map[uint64]reflect.Type - - // any locks we've taken, indexed by depth - locks []sync.Locker - // take locks while walking the structure - useLocks bool -} - -func (w *walker) Enter(l reflectwalk.Location) error { - w.depth++ - - // ensure we have enough elements to index via w.depth - for w.depth >= len(w.locks) { - w.locks = append(w.locks, nil) - } - - for len(w.ps) < w.depth+1 { - w.ps = append(w.ps, 0) - } - - return nil -} - -func (w *walker) Exit(l reflectwalk.Location) error { - locker := w.locks[w.depth] - w.locks[w.depth] = nil - if locker != nil { - defer locker.Unlock() - } - - // clear out pointers and interfaces as we exit the stack - w.ps[w.depth] = 0 - - for k := range w.ifaceTypes { - mask := uint64(^uint32(0)) - if k&mask == uint64(w.depth) { - delete(w.ifaceTypes, k) - } - } - - w.depth-- - if w.ignoreDepth > w.depth { - w.ignoreDepth = 0 - } - - if w.ignoring() { - return nil - } - - switch l { - case reflectwalk.Array: - fallthrough - case reflectwalk.Map: - fallthrough - case reflectwalk.Slice: - w.replacePointerMaybe() - - // Pop map off our container - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - // Pop off the key and value - mv := w.valPop() - mk := w.valPop() - m := w.cs[len(w.cs)-1] - - // If mv is the zero value, SetMapIndex deletes the key form the map, - // or in this case never adds it. We need to create a properly typed - // zero value so that this key can be set. - if !mv.IsValid() { - mv = reflect.Zero(m.Elem().Type().Elem()) - } - m.Elem().SetMapIndex(mk, mv) - case reflectwalk.ArrayElem: - // Pop off the value and the index and set it on the array - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - a := w.cs[len(w.cs)-1] - ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call - if ae.CanSet() { - ae.Set(v) - } - } - case reflectwalk.SliceElem: - // Pop off the value and the index and set it on the slice - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - se := s.Elem().Index(i) - if se.CanSet() { - se.Set(v) - } - } - case reflectwalk.Struct: - w.replacePointerMaybe() - - // Remove the struct from the container stack - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.StructField: - // Pop off the value and the field - v := w.valPop() - f := w.valPop().Interface().(reflect.StructField) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - - if sf.CanSet() { - sf.Set(v) - } - } - case reflectwalk.WalkLoc: - // Clear out the slices for GC - w.cs = nil - w.vals = nil - } - - return nil -} - -func (w *walker) Map(m reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(m) - - // Create the map. If the map itself is nil, then just make a nil map - var newMap reflect.Value - if m.IsNil() { - newMap = reflect.New(m.Type()) - } else { - newMap = wrapPtr(reflect.MakeMap(m.Type())) - } - - w.cs = append(w.cs, newMap) - w.valPush(newMap) - return nil -} - -func (w *walker) MapElem(m, k, v reflect.Value) error { - return nil -} - -func (w *walker) PointerEnter(v bool) error { - if v { - w.ps[w.depth]++ - } - return nil -} - -func (w *walker) PointerExit(v bool) error { - if v { - w.ps[w.depth]-- - } - return nil -} - -func (w *walker) Interface(v reflect.Value) error { - if !v.IsValid() { - return nil - } - if w.ifaceTypes == nil { - w.ifaceTypes = make(map[uint64]reflect.Type) - } - - w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() - return nil -} - -func (w *walker) Primitive(v reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(v) - - // IsValid verifies the v is non-zero and CanInterface verifies - // that we're allowed to read this value (unexported fields). - var newV reflect.Value - if v.IsValid() && v.CanInterface() { - newV = reflect.New(v.Type()) - newV.Elem().Set(v) - } - - w.valPush(newV) - w.replacePointerMaybe() - return nil -} - -func (w *walker) Slice(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var newS reflect.Value - if s.IsNil() { - newS = reflect.New(s.Type()) - } else { - newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) - } - - w.cs = append(w.cs, newS) - w.valPush(newS) - return nil -} - -func (w *walker) SliceElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the slice here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Array(a reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(a) - - newA := reflect.New(a.Type()) - - w.cs = append(w.cs, newA) - w.valPush(newA) - return nil -} - -func (w *walker) ArrayElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the array here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Struct(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var v reflect.Value - if c, ok := Copiers[s.Type()]; ok { - // We have a Copier for this struct, so we use that copier to - // get the copy, and we ignore anything deeper than this. - w.ignoreDepth = w.depth - - dup, err := c(s.Interface()) - if err != nil { - return err - } - - // We need to put a pointer to the value on the value stack, - // so allocate a new pointer and set it. - v = reflect.New(s.Type()) - reflect.Indirect(v).Set(reflect.ValueOf(dup)) - } else { - // No copier, we copy ourselves and allow reflectwalk to guide - // us deeper into the structure for copying. - v = reflect.New(s.Type()) - } - - // Push the value onto the value stack for setting the struct field, - // and add the struct itself to the containers stack in case we walk - // deeper so that its own fields can be modified. - w.valPush(v) - w.cs = append(w.cs, v) - - return nil -} - -func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { - if w.ignoring() { - return nil - } - - // If PkgPath is non-empty, this is a private (unexported) field. - // We do not set this unexported since the Go runtime doesn't allow us. - if f.PkgPath != "" { - return reflectwalk.SkipEntry - } - - // Push the field onto the stack, we'll handle it when we exit - // the struct field in Exit... - w.valPush(reflect.ValueOf(f)) - return nil -} - -// ignore causes the walker to ignore any more values until we exit this on -func (w *walker) ignore() { - w.ignoreDepth = w.depth -} - -func (w *walker) ignoring() bool { - return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth -} - -func (w *walker) pointerPeek() bool { - return w.ps[w.depth] > 0 -} - -func (w *walker) valPop() reflect.Value { - result := w.vals[len(w.vals)-1] - w.vals = w.vals[:len(w.vals)-1] - - // If we're out of values, that means we popped everything off. In - // this case, we reset the result so the next pushed value becomes - // the result. - if len(w.vals) == 0 { - w.Result = nil - } - - return result -} - -func (w *walker) valPush(v reflect.Value) { - w.vals = append(w.vals, v) - - // If we haven't set the result yet, then this is the result since - // it is the first (outermost) value we're seeing. - if w.Result == nil && v.IsValid() { - w.Result = v.Interface() - } -} - -func (w *walker) replacePointerMaybe() { - // Determine the last pointer value. If it is NOT a pointer, then - // we need to push that onto the stack. - if !w.pointerPeek() { - w.valPush(reflect.Indirect(w.valPop())) - return - } - - v := w.valPop() - - // If the expected type is a pointer to an interface of any depth, - // such as *interface{}, **interface{}, etc., then we need to convert - // the value "v" from *CONCRETE to *interface{} so types match for - // Set. - // - // Example if v is type *Foo where Foo is a struct, v would become - // *interface{} instead. This only happens if we have an interface expectation - // at this depth. - // - // For more info, see GH-16 - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { - y := reflect.New(iType) // Create *interface{} - y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) - v = y // v is now typed *interface{} (where *v = Foo) - } - - for i := 1; i < w.ps[w.depth]; i++ { - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { - iface := reflect.New(iType).Elem() - iface.Set(v) - v = iface - } - - p := reflect.New(v.Type()) - p.Elem().Set(v) - v = p - } - - w.valPush(v) -} - -// if this value is a Locker, lock it and add it to the locks slice -func (w *walker) lock(v reflect.Value) { - if !w.useLocks { - return - } - - if !v.IsValid() || !v.CanInterface() { - return - } - - type rlocker interface { - RLocker() sync.Locker - } - - var locker sync.Locker - - // We can't call Interface() on a value directly, since that requires - // a copy. This is OK, since the pointer to a value which is a sync.Locker - // is also a sync.Locker. - if v.Kind() == reflect.Ptr { - switch l := v.Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } else if v.CanAddr() { - switch l := v.Addr().Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } - - // still no callable locker - if locker == nil { - return - } - - // don't lock a mutex directly - switch locker.(type) { - case *sync.Mutex, *sync.RWMutex: - return - } - - locker.Lock() - w.locks[w.depth] = locker -} - -// wrapPtr is a helper that takes v and always make it *v. copystructure -// stores things internally as pointers until the last moment before unwrapping -func wrapPtr(v reflect.Value) reflect.Value { - if !v.IsValid() { - return v - } - vPtr := reflect.New(v.Type()) - vPtr.Elem().Set(v) - return vPtr -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a..0000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d..0000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 47e1f9e..0000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,137 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -func dirUnix() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // If that fails, try getent - var stdout bytes.Buffer - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd = exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE deleted file mode 100644 index a3866a2..0000000 --- a/vendor/github.com/mitchellh/go-testing-interface/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md deleted file mode 100644 index 26781bb..0000000 --- a/vendor/github.com/mitchellh/go-testing-interface/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# go-testing-interface - -go-testing-interface is a Go library that exports an interface that -`*testing.T` implements as well as a runtime version you can use in its -place. - -The purpose of this library is so that you can export test helpers as a -public API without depending on the "testing" package, since you can't -create a `*testing.T` struct manually. This lets you, for example, use the -public testing APIs to generate mock data at runtime, rather than just at -test time. - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). - -Given a test helper written using `go-testing-interface` like this: - - import "github.com/mitchellh/go-testing-interface" - - func TestHelper(t testing.T) { - t.Fatal("I failed") - } - -You can call the test helper in a real test easily: - - import "testing" - - func TestThing(t *testing.T) { - TestHelper(t) - } - -You can also call the test helper at runtime if needed: - - import "github.com/mitchellh/go-testing-interface" - - func main() { - TestHelper(&testing.RuntimeT{}) - } - -## Why?! - -**Why would I call a test helper that takes a *testing.T at runtime?** - -You probably shouldn't. The only use case I've seen (and I've had) for this -is to implement a "dev mode" for a service where the test helpers are used -to populate mock data, create a mock DB, perhaps run service dependencies -in-memory, etc. - -Outside of a "dev mode", I've never seen a use case for this and I think -there shouldn't be one since the point of the `testing.T` interface is that -you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go deleted file mode 100644 index af6426a..0000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ /dev/null @@ -1,70 +0,0 @@ -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Log(args ...interface{}) - Logf(format string, args ...interface{}) -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. -type RuntimeT struct { - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.Fail() -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.FailNow() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE deleted file mode 100644 index a3866a2..0000000 --- a/vendor/github.com/mitchellh/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md deleted file mode 100644 index 28ce45a..0000000 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - - * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, - allowing effective hashing of time.Time - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - -```go -type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} -} - -v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, -} - -hash, err := hashstructure.Hash(v, nil) -if err != nil { - panic(err) -} - -fmt.Printf("%d", hash) -// Output: -// 2307517237273902113 -``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go deleted file mode 100644 index ea13a15..0000000 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ /dev/null @@ -1,358 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" -) - -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string - - // ZeroNil is flag determining if nil pointer should be treated equal - // to a zero value of pointed type. By default this is false. - ZeroNil bool -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" or "-" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -// * "string" - The field will be hashed as a string, only works when the -// field implements fmt.Stringer -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string - zeronil bool -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - t := reflect.TypeOf(0) - - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - if w.zeronil { - t = v.Type().Elem() - } - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - v = reflect.Zero(t) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - parent := v.Interface() - var include Includable - if impl, ok := parent.(Includable); ok { - include = impl - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" || tag == "-" { - // Ignore this field - continue - } - - // if string is set, use the string value - if tag == "string" { - if impl, ok := innerV.Interface().(fmt.Stringer); ok { - innerV = reflect.ValueOf(impl.String()) - } else { - return 0, &ErrNotStringer{ - Field: v.Type().Field(i).Name, - } - } - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, innerV) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(innerV, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } - -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go deleted file mode 100644 index b6289c0..0000000 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ /dev/null @@ -1,15 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a..0000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 659d688..0000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index afcfd5e..0000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,152 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() - } - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5..0000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 6ec5c33..0000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,828 +0,0 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes a map and uses reflection to convert it into the -// given Go native structure. val must be a pointer to a struct. -func Decode(m interface{}, rawVal interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: rawVal, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(raw interface{}) error { - return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { - if data == nil { - // If the data is nil, then we don't set anything. - return nil - } - - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - // If the data value is invalid, then we just set the value - // to be the zero value. - val.Set(reflect.Zero(val.Type())) - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the data. - var err error - data, err = DecodeHookExec( - d.config.DecodeHook, - dataVal.Type(), val.Type(), data) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - dataKind := getKind(val) - switch dataKind { - case reflect.Bool: - err = d.decodeBool(name, data, val) - case reflect.Interface: - err = d.decodeBasic(name, data, val) - case reflect.String: - err = d.decodeString(name, data, val) - case reflect.Int: - err = d.decodeInt(name, data, val) - case reflect.Uint: - err = d.decodeUint(name, data, val) - case reflect.Float32: - err = d.decodeFloat(name, data, val) - case reflect.Struct: - err = d.decodeStruct(name, data, val) - case reflect.Map: - err = d.decodeMap(name, data, val) - case reflect.Ptr: - err = d.decodePtr(name, data, val) - case reflect.Slice: - err = d.decodeSlice(name, data, val) - case reflect.Func: - err = d.decodeFunc(name, data, val) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, dataKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metadata. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if dataVal.Kind() != reflect.Map { - // In weak mode, we accept a slice of maps as an input... - if d.config.WeaklyTypedInput { - switch dataVal.Kind() { - case reflect.Array, reflect.Slice: - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - fmt.Sprintf("%s[%d]", name, i), - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil - } - } - - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } - - // Accumulate errors - errors := make([]string, 0) - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - return nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } - - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() - - // If "squash" is specified in the tag, we squash the field down. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) - } else { - structs = append(structs, val.FieldByName(fieldType.Name)) - } - continue - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - for fieldType, field := range fields { - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE deleted file mode 100644 index f9c841a..0000000 --- a/vendor/github.com/mitchellh/reflectwalk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md deleted file mode 100644 index ac82cd2..0000000 --- a/vendor/github.com/mitchellh/reflectwalk/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# reflectwalk - -reflectwalk is a Go library for "walking" a value in Go using reflection, -in the same way a directory tree can be "walked" on the filesystem. Walking -a complex structure can allow you to do manipulations on unknown structures -such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go deleted file mode 100644 index 6a7f176..0000000 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ /dev/null @@ -1,19 +0,0 @@ -package reflectwalk - -//go:generate stringer -type=Location location.go - -type Location uint - -const ( - None Location = iota - Map - MapKey - MapValue - Slice - SliceElem - Array - ArrayElem - Struct - StructField - WalkLoc -) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go deleted file mode 100644 index d3cfe85..0000000 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=Location location.go; DO NOT EDIT - -package reflectwalk - -import "fmt" - -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" - -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} - -func (i Location) String() string { - if i+1 >= Location(len(_Location_index)) { - return fmt.Sprintf("Location(%d)", i) - } - return _Location_name[_Location_index[i]:_Location_index[i+1]] -} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go deleted file mode 100644 index d7ab7b6..0000000 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ /dev/null @@ -1,401 +0,0 @@ -// reflectwalk is a package that allows you to "walk" complex structures -// similar to how you may "walk" a filesystem: visiting every element one -// by one and calling callback functions allowing you to handle and manipulate -// those elements. -package reflectwalk - -import ( - "errors" - "reflect" -) - -// PrimitiveWalker implementations are able to handle primitive values -// within complex structures. Primitive values are numbers, strings, -// booleans, funcs, chans. -// -// These primitive values are often members of more complex -// structures (slices, maps, etc.) that are walkable by other interfaces. -type PrimitiveWalker interface { - Primitive(reflect.Value) error -} - -// InterfaceWalker implementations are able to handle interface values as they -// are encountered during the walk. -type InterfaceWalker interface { - Interface(reflect.Value) error -} - -// MapWalker implementations are able to handle individual elements -// found within a map structure. -type MapWalker interface { - Map(m reflect.Value) error - MapElem(m, k, v reflect.Value) error -} - -// SliceWalker implementations are able to handle slice elements found -// within complex structures. -type SliceWalker interface { - Slice(reflect.Value) error - SliceElem(int, reflect.Value) error -} - -// ArrayWalker implementations are able to handle array elements found -// within complex structures. -type ArrayWalker interface { - Array(reflect.Value) error - ArrayElem(int, reflect.Value) error -} - -// StructWalker is an interface that has methods that are called for -// structs when a Walk is done. -type StructWalker interface { - Struct(reflect.Value) error - StructField(reflect.StructField, reflect.Value) error -} - -// EnterExitWalker implementations are notified before and after -// they walk deeper into complex structures (into struct fields, -// into slice elements, etc.) -type EnterExitWalker interface { - Enter(Location) error - Exit(Location) error -} - -// PointerWalker implementations are notified when the value they're -// walking is a pointer or not. Pointer is called for _every_ value whether -// it is a pointer or not. -type PointerWalker interface { - PointerEnter(bool) error - PointerExit(bool) error -} - -// SkipEntry can be returned from walk functions to skip walking -// the value of this field. This is only valid in the following functions: -// -// - Struct: skips all fields from being walked -// - StructField: skips walking the struct value -// -var SkipEntry = errors.New("skip this entry") - -// Walk takes an arbitrary value and an interface and traverses the -// value, calling callbacks on the interface if they are supported. -// The interface should implement one or more of the walker interfaces -// in this package, such as PrimitiveWalker, StructWalker, etc. -func Walk(data, walker interface{}) (err error) { - v := reflect.ValueOf(data) - ew, ok := walker.(EnterExitWalker) - if ok { - err = ew.Enter(WalkLoc) - } - - if err == nil { - err = walk(v, walker) - } - - if ok && err == nil { - err = ew.Exit(WalkLoc) - } - - return -} - -func walk(v reflect.Value, w interface{}) (err error) { - // Determine if we're receiving a pointer and if so notify the walker. - // The logic here is convoluted but very important (tests will fail if - // almost any part is changed). I will try to explain here. - // - // First, we check if the value is an interface, if so, we really need - // to check the interface's VALUE to see whether it is a pointer. - // - // Check whether the value is then a pointer. If so, then set pointer - // to true to notify the user. - // - // If we still have a pointer or an interface after the indirections, then - // we unwrap another level - // - // At this time, we also set "v" to be the dereferenced value. This is - // because once we've unwrapped the pointer we want to use that value. - pointer := false - pointerV := v - - for { - if pointerV.Kind() == reflect.Interface { - if iw, ok := w.(InterfaceWalker); ok { - if err = iw.Interface(pointerV); err != nil { - return - } - } - - pointerV = pointerV.Elem() - } - - if pointerV.Kind() == reflect.Ptr { - pointer = true - v = reflect.Indirect(pointerV) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return - } - - defer func(pointer bool) { - if err != nil { - return - } - - err = pw.PointerExit(pointer) - }(pointer) - } - - if pointer { - pointerV = v - } - pointer = false - - // If we still have a pointer or interface we have to indirect another level. - switch pointerV.Kind() { - case reflect.Ptr, reflect.Interface: - continue - } - break - } - - // We preserve the original value here because if it is an interface - // type, we want to pass that directly into the walkPrimitive, so that - // we can set it. - originalV := v - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - k := v.Kind() - if k >= reflect.Int && k <= reflect.Complex128 { - k = reflect.Int - } - - switch k { - // Primitives - case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: - err = walkPrimitive(originalV, w) - return - case reflect.Map: - err = walkMap(v, w) - return - case reflect.Slice: - err = walkSlice(v, w) - return - case reflect.Struct: - err = walkStruct(v, w) - return - case reflect.Array: - err = walkArray(v, w) - return - default: - panic("unsupported type: " + k.String()) - } -} - -func walkMap(v reflect.Value, w interface{}) error { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Map) - } - - if mw, ok := w.(MapWalker); ok { - if err := mw.Map(v); err != nil { - return err - } - } - - for _, k := range v.MapKeys() { - kv := v.MapIndex(k) - - if mw, ok := w.(MapWalker); ok { - if err := mw.MapElem(v, k, kv); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(MapKey) - } - - if err := walk(k, w); err != nil { - return err - } - - if ok { - ew.Exit(MapKey) - ew.Enter(MapValue) - } - - if err := walk(kv, w); err != nil { - return err - } - - if ok { - ew.Exit(MapValue) - } - } - - if ewok { - ew.Exit(Map) - } - - return nil -} - -func walkPrimitive(v reflect.Value, w interface{}) error { - if pw, ok := w.(PrimitiveWalker); ok { - return pw.Primitive(v) - } - - return nil -} - -func walkSlice(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Slice) - } - - if sw, ok := w.(SliceWalker); ok { - if err := sw.Slice(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if sw, ok := w.(SliceWalker); ok { - if err := sw.SliceElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(SliceElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(SliceElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Slice) - } - - return nil -} - -func walkArray(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Array) - } - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.Array(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.ArrayElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(ArrayElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(ArrayElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Array) - } - - return nil -} - -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) - } - - skip := false - if sw, ok := w.(StructWalker); ok { - err = sw.Struct(v) - if err == SkipEntry { - skip = true - err = nil - } - if err != nil { - return - } - } - - if !skip { - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) - - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue - } - - if err != nil { - return - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) - } - } - } - - if ewok { - ew.Exit(Struct) - } - - return nil -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt deleted file mode 100644 index 16249b4..0000000 --- a/vendor/github.com/posener/complete/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2017 Eyal Posener - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go deleted file mode 100644 index 73c356d..0000000 --- a/vendor/github.com/posener/complete/args.go +++ /dev/null @@ -1,75 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" -) - -// Args describes command line arguments -type Args struct { - // All lists of all arguments in command line (not including the command itself) - All []string - // Completed lists of all completed arguments in command line, - // If the last one is still being typed - no space after it, - // it won't appear in this list of arguments. - Completed []string - // Last argument in command line, the one being typed, if the last - // character in the command line is a space, this argument will be empty, - // otherwise this would be the last word. - Last string - // LastCompleted is the last argument that was fully typed. - // If the last character in the command line is space, this would be the - // last word, otherwise, it would be the word before that. - LastCompleted string -} - -// Directory gives the directory of the current written -// last argument if it represents a file name being written. -// in case that it is not, we fall back to the current directory. -func (a Args) Directory() string { - if info, err := os.Stat(a.Last); err == nil && info.IsDir() { - return fixPathForm(a.Last, a.Last) - } - dir := filepath.Dir(a.Last) - if info, err := os.Stat(dir); err != nil || !info.IsDir() { - return "./" - } - return fixPathForm(a.Last, dir) -} - -func newArgs(line []string) Args { - completed := removeLast(line[1:]) - return Args{ - All: line[1:], - Completed: completed, - Last: last(line), - LastCompleted: last(completed), - } -} - -func (a Args) from(i int) Args { - if i > len(a.All) { - i = len(a.All) - } - a.All = a.All[i:] - - if i > len(a.Completed) { - i = len(a.Completed) - } - a.Completed = a.Completed[i:] - return a -} - -func removeLast(a []string) []string { - if len(a) > 0 { - return a[:len(a)-1] - } - return a -} - -func last(args []string) (last string) { - if len(args) > 0 { - last = args[len(args)-1] - } - return -} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go deleted file mode 100644 index 7137dee..0000000 --- a/vendor/github.com/posener/complete/cmd/cmd.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package cmd used for command line options for the complete tool -package cmd - -import ( - "errors" - "flag" - "fmt" - "os" - "strings" - - "github.com/posener/complete/cmd/install" -) - -// CLI for command line -type CLI struct { - Name string - InstallName string - UninstallName string - - install bool - uninstall bool - yes bool -} - -const ( - defaultInstallName = "install" - defaultUninstallName = "uninstall" -) - -// Run is used when running complete in command line mode. -// this is used when the complete is not completing words, but to -// install it or uninstall it. -func (f *CLI) Run() bool { - err := f.validate() - if err != nil { - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(1) - } - - switch { - case f.install: - f.prompt() - err = install.Install(f.Name) - case f.uninstall: - f.prompt() - err = install.Uninstall(f.Name) - default: - // non of the action flags matched, - // returning false should make the real program execute - return false - } - - if err != nil { - fmt.Printf("%s failed! %s\n", f.action(), err) - os.Exit(3) - } - fmt.Println("Done!") - return true -} - -// prompt use for approval -// exit if approval was not given -func (f *CLI) prompt() { - defer fmt.Println(f.action() + "ing...") - if f.yes { - return - } - fmt.Printf("%s completion for %s? ", f.action(), f.Name) - var answer string - fmt.Scanln(&answer) - - switch strings.ToLower(answer) { - case "y", "yes": - return - default: - fmt.Println("Cancelling...") - os.Exit(1) - } -} - -// AddFlags adds the CLI flags to the flag set. -// If flags is nil, the default command line flags will be taken. -// Pass non-empty strings as installName and uninstallName to override the default -// flag names. -func (f *CLI) AddFlags(flags *flag.FlagSet) { - if flags == nil { - flags = flag.CommandLine - } - - if f.InstallName == "" { - f.InstallName = defaultInstallName - } - if f.UninstallName == "" { - f.UninstallName = defaultUninstallName - } - - if flags.Lookup(f.InstallName) == nil { - flags.BoolVar(&f.install, f.InstallName, false, - fmt.Sprintf("Install completion for %s command", f.Name)) - } - if flags.Lookup(f.UninstallName) == nil { - flags.BoolVar(&f.uninstall, f.UninstallName, false, - fmt.Sprintf("Uninstall completion for %s command", f.Name)) - } - if flags.Lookup("y") == nil { - flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes'") - } -} - -// validate the CLI -func (f *CLI) validate() error { - if f.install && f.uninstall { - return errors.New("Install and uninstall are mutually exclusive") - } - return nil -} - -// action name according to the CLI values. -func (f *CLI) action() string { - switch { - case f.install: - return "Install" - case f.uninstall: - return "Uninstall" - default: - return "unknown" - } -} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go deleted file mode 100644 index a287f99..0000000 --- a/vendor/github.com/posener/complete/cmd/install/bash.go +++ /dev/null @@ -1,32 +0,0 @@ -package install - -import "fmt" - -// (un)install in bash -// basically adds/remove from .bashrc: -// -// complete -C -type bash struct { - rc string -} - -func (b bash) Install(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if lineInFile(b.rc, completeCmd) { - return fmt.Errorf("already installed in %s", b.rc) - } - return appendToFile(b.rc, completeCmd) -} - -func (b bash) Uninstall(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if !lineInFile(b.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", b.rc) - } - - return removeFromFile(b.rc, completeCmd) -} - -func (bash) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go deleted file mode 100644 index fb44b2b..0000000 --- a/vendor/github.com/posener/complete/cmd/install/install.go +++ /dev/null @@ -1,92 +0,0 @@ -package install - -import ( - "errors" - "os" - "os/user" - "path/filepath" - - "github.com/hashicorp/go-multierror" -) - -type installer interface { - Install(cmd, bin string) error - Uninstall(cmd, bin string) error -} - -// Install complete command given: -// cmd: is the command name -func Install(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not found any shells to install") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Install(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -// Uninstall complete command given: -// cmd: is the command name -func Uninstall(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not found any shells to uninstall") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Uninstall(cmd, bin) - if errI != nil { - multierror.Append(err, errI) - } - } - - return err -} - -func installers() (i []installer) { - for _, rc := range [...]string{".bashrc", ".bash_profile"} { - if f := rcFile(rc); f != "" { - i = append(i, bash{f}) - break - } - } - if f := rcFile(".zshrc"); f != "" { - i = append(i, zsh{f}) - } - return -} - -func getBinaryPath() (string, error) { - bin, err := os.Executable() - if err != nil { - return "", err - } - return filepath.Abs(bin) -} - -func rcFile(name string) string { - u, err := user.Current() - if err != nil { - return "" - } - path := filepath.Join(u.HomeDir, name) - if _, err := os.Stat(path); err != nil { - return "" - } - return path -} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go deleted file mode 100644 index 2c8b44c..0000000 --- a/vendor/github.com/posener/complete/cmd/install/utils.go +++ /dev/null @@ -1,118 +0,0 @@ -package install - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" -) - -func lineInFile(name string, lookFor string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - r := bufio.NewReader(f) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - return false - } - if err != nil { - return false - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - if string(line) == lookFor { - return true - } - prefix = prefix[:0] - } -} - -func appendToFile(name string, content string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) - if err != nil { - return err - } - defer f.Close() - _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) - return err -} - -func removeFromFile(name string, content string) error { - backup := name + ".bck" - err := copyFile(name, backup) - if err != nil { - return err - } - temp, err := removeContentToTempFile(name, content) - if err != nil { - return err - } - - err = copyFile(temp, name) - if err != nil { - return err - } - - return os.Remove(backup) -} - -func removeContentToTempFile(name, content string) (string, error) { - rf, err := os.Open(name) - if err != nil { - return "", err - } - defer rf.Close() - wf, err := ioutil.TempFile("/tmp", "complete-") - if err != nil { - return "", err - } - defer wf.Close() - - r := bufio.NewReader(rf) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - break - } - if err != nil { - return "", err - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - str := string(line) - if str == content { - continue - } - wf.WriteString(str + "\n") - prefix = prefix[:0] - } - return wf.Name(), nil -} - -func copyFile(src string, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return err - } - defer out.Close() - _, err = io.Copy(out, in) - return err -} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go deleted file mode 100644 index 9ece779..0000000 --- a/vendor/github.com/posener/complete/cmd/install/zsh.go +++ /dev/null @@ -1,39 +0,0 @@ -package install - -import "fmt" - -// (un)install in zsh -// basically adds/remove from .zshrc: -// -// autoload -U +X bashcompinit && bashcompinit" -// complete -C -type zsh struct { - rc string -} - -func (z zsh) Install(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if lineInFile(z.rc, completeCmd) { - return fmt.Errorf("already installed in %s", z.rc) - } - - bashCompInit := "autoload -U +X bashcompinit && bashcompinit" - if !lineInFile(z.rc, bashCompInit) { - completeCmd = bashCompInit + "\n" + completeCmd - } - - return appendToFile(z.rc, completeCmd) -} - -func (z zsh) Uninstall(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if !lineInFile(z.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", z.rc) - } - - return removeFromFile(z.rc, completeCmd) -} - -func (zsh) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go deleted file mode 100644 index 6de48e9..0000000 --- a/vendor/github.com/posener/complete/command.go +++ /dev/null @@ -1,118 +0,0 @@ -package complete - -import "github.com/posener/complete/match" - -// Command represents a command line -// It holds the data that enables auto completion of command line -// Command can also be a sub command. -type Command struct { - // Sub is map of sub commands of the current command - // The key refer to the sub command name, and the value is it's - // Command descriptive struct. - Sub Commands - - // Flags is a map of flags that the command accepts. - // The key is the flag name, and the value is it's predictions. - Flags Flags - - // GlobalFlags is a map of flags that the command accepts. - // Global flags that can appear also after a sub command. - GlobalFlags Flags - - // Args are extra arguments that the command accepts, those who are - // given without any flag before. - Args Predictor -} - -// Predict returns all possible predictions for args according to the command struct -func (c *Command) Predict(a Args) (predictions []string) { - predictions, _ = c.predict(a) - return -} - -// Commands is the type of Sub member, it maps a command name to a command struct -type Commands map[string]Command - -// Predict completion of sub command names names according to command line arguments -func (c Commands) Predict(a Args) (prediction []string) { - for sub := range c { - if match.Prefix(sub, a.Last) { - prediction = append(prediction, sub) - } - } - return -} - -// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. -type Flags map[string]Predictor - -// Predict completion of flags names according to command line arguments -func (f Flags) Predict(a Args) (prediction []string) { - for flag := range f { - // If the flag starts with a hyphen, we avoid emitting the prediction - // unless the last typed arg contains a hyphen as well. - flagHyphenStart := len(flag) != 0 && flag[0] == '-' - lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' - if flagHyphenStart && !lastHyphenStart { - continue - } - - if match.Prefix(flag, a.Last) { - prediction = append(prediction, flag) - } - } - return -} - -// predict options -// only is set to true if no more options are allowed to be returned -// those are in cases of special flag that has specific completion arguments, -// and other flags or sub commands can't come after it. -func (c *Command) predict(a Args) (options []string, only bool) { - - // search sub commands for predictions first - subCommandFound := false - for i, arg := range a.Completed { - if cmd, ok := c.Sub[arg]; ok { - subCommandFound = true - - // recursive call for sub command - options, only = cmd.predict(a.from(i)) - if only { - return - } - - // We matched so stop searching. Continuing to search can accidentally - // match a subcommand with current set of commands, see issue #46. - break - } - } - - // if last completed word is a global flag that we need to complete - if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to global flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.GlobalFlags.Predict(a)...) - - // if a sub command was entered, we won't add the parent command - // completions and we return here. - if subCommandFound { - return - } - - // if last completed word is a command flag that we need to complete - if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.Sub.Predict(a)...) - options = append(options, c.Flags.Predict(a)...) - if c.Args != nil { - options = append(options, c.Args.Predict(a)...) - } - - return -} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go deleted file mode 100644 index 1df6617..0000000 --- a/vendor/github.com/posener/complete/complete.go +++ /dev/null @@ -1,86 +0,0 @@ -// Package complete provides a tool for bash writing bash completion in go. -// -// Writing bash completion scripts is a hard work. This package provides an easy way -// to create bash completion scripts for any command, and also an easy way to install/uninstall -// the completion of the command. -package complete - -import ( - "flag" - "fmt" - "os" - "strings" - - "github.com/posener/complete/cmd" -) - -const ( - envComplete = "COMP_LINE" - envDebug = "COMP_DEBUG" -) - -// Complete structs define completion for a command with CLI options -type Complete struct { - Command Command - cmd.CLI -} - -// New creates a new complete command. -// name is the name of command we want to auto complete. -// IMPORTANT: it must be the same name - if the auto complete -// completes the 'go' command, name must be equal to "go". -// command is the struct of the command completion. -func New(name string, command Command) *Complete { - return &Complete{ - Command: command, - CLI: cmd.CLI{Name: name}, - } -} - -// Run runs the completion and add installation flags beforehand. -// The flags are added to the main flag CommandLine variable. -func (c *Complete) Run() bool { - c.AddFlags(nil) - flag.Parse() - return c.Complete() -} - -// Complete a command from completion line in environment variable, -// and print out the complete options. -// returns success if the completion ran or if the cli matched -// any of the given flags, false otherwise -// For installation: it assumes that flags were added and parsed before -// it was called. -func (c *Complete) Complete() bool { - line, ok := getLine() - if !ok { - // make sure flags parsed, - // in case they were not added in the main program - return c.CLI.Run() - } - Log("Completing line: %s", line) - - a := newArgs(line) - - options := c.Command.Predict(a) - - Log("Completion: %s", options) - output(options) - return true -} - -func getLine() ([]string, bool) { - line := os.Getenv(envComplete) - if line == "" { - return nil, false - } - return strings.Split(line, " "), true -} - -func output(options []string) { - Log("") - // stdout of program defines the complete options - for _, option := range options { - fmt.Println(option) - } -} diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go deleted file mode 100644 index 797a80c..0000000 --- a/vendor/github.com/posener/complete/log.go +++ /dev/null @@ -1,23 +0,0 @@ -package complete - -import ( - "io" - "io/ioutil" - "log" - "os" -) - -// Log is used for debugging purposes -// since complete is running on tab completion, it is nice to -// have logs to the stderr (when writing your own completer) -// to write logs, set the COMP_DEBUG environment variable and -// use complete.Log in the complete program -var Log = getLogger() - -func getLogger() func(format string, args ...interface{}) { - var logfile io.Writer = ioutil.Discard - if os.Getenv(envDebug) != "" { - logfile = os.Stderr - } - return log.New(logfile, "complete ", log.Flags()).Printf -} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go deleted file mode 100644 index 051171e..0000000 --- a/vendor/github.com/posener/complete/match/file.go +++ /dev/null @@ -1,19 +0,0 @@ -package match - -import "strings" - -// File returns true if prefix can match the file -func File(file, prefix string) bool { - // special case for current directory completion - if file == "./" && (prefix == "." || prefix == "") { - return true - } - if prefix == "." && strings.HasPrefix(file, ".") { - return true - } - - file = strings.TrimPrefix(file, "./") - prefix = strings.TrimPrefix(prefix, "./") - - return strings.HasPrefix(file, prefix) -} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go deleted file mode 100644 index 812fcac..0000000 --- a/vendor/github.com/posener/complete/match/match.go +++ /dev/null @@ -1,6 +0,0 @@ -package match - -// Match matches two strings -// it is used for comparing a term to the last typed -// word, the prefix, and see if it is a possible auto complete option. -type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go deleted file mode 100644 index 9a01ba6..0000000 --- a/vendor/github.com/posener/complete/match/prefix.go +++ /dev/null @@ -1,9 +0,0 @@ -package match - -import "strings" - -// Prefix is a simple Matcher, if the word is it's prefix, there is a match -// Match returns true if a has the prefix as prefix -func Prefix(long, prefix string) bool { - return strings.HasPrefix(long, prefix) -} diff --git a/vendor/github.com/posener/complete/metalinter.json b/vendor/github.com/posener/complete/metalinter.json deleted file mode 100644 index 799c1d0..0000000 --- a/vendor/github.com/posener/complete/metalinter.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Vendor": true, - "DisableAll": true, - "Enable": [ - "gofmt", - "goimports", - "interfacer", - "goconst", - "misspell", - "unconvert", - "gosimple", - "golint", - "structcheck", - "deadcode", - "vet" - ], - "Exclude": [ - "initTests is unused" - ], - "Deadline": "2m" -} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go deleted file mode 100644 index 8207063..0000000 --- a/vendor/github.com/posener/complete/predict.go +++ /dev/null @@ -1,41 +0,0 @@ -package complete - -// Predictor implements a predict method, in which given -// command line arguments returns a list of options it predicts. -type Predictor interface { - Predict(Args) []string -} - -// PredictOr unions two predicate functions, so that the result predicate -// returns the union of their predication -func PredictOr(predictors ...Predictor) Predictor { - return PredictFunc(func(a Args) (prediction []string) { - for _, p := range predictors { - if p == nil { - continue - } - prediction = append(prediction, p.Predict(a)...) - } - return - }) -} - -// PredictFunc determines what terms can follow a command or a flag -// It is used for auto completion, given last - the last word in the already -// in the command line, what words can complete it. -type PredictFunc func(Args) []string - -// Predict invokes the predict function and implements the Predictor interface -func (p PredictFunc) Predict(a Args) []string { - if p == nil { - return nil - } - return p(a) -} - -// PredictNothing does not expect anything after. -var PredictNothing Predictor - -// PredictAnything expects something, but nothing particular, such as a number -// or arbitrary name. -var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go deleted file mode 100644 index c8adf7e..0000000 --- a/vendor/github.com/posener/complete/predict_files.go +++ /dev/null @@ -1,108 +0,0 @@ -package complete - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/posener/complete/match" -) - -// PredictDirs will search for directories in the given started to be typed -// path, if no path was started to be typed, it will complete to directories -// in the current working directory. -func PredictDirs(pattern string) Predictor { - return files(pattern, false) -} - -// PredictFiles will search for files matching the given pattern in the started to -// be typed path, if no path was started to be typed, it will complete to files that -// match the pattern in the current working directory. -// To match any file, use "*" as pattern. To match go files use "*.go", and so on. -func PredictFiles(pattern string) Predictor { - return files(pattern, true) -} - -func files(pattern string, allowFiles bool) PredictFunc { - - // search for files according to arguments, - // if only one directory has matched the result, search recursively into - // this directory to give more results. - return func(a Args) (prediction []string) { - prediction = predictFiles(a, pattern, allowFiles) - - // if the number of prediction is not 1, we either have many results or - // have no results, so we return it. - if len(prediction) != 1 { - return - } - - // only try deeper, if the one item is a directory - if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { - return - } - - a.Last = prediction[0] - return predictFiles(a, pattern, allowFiles) - } -} - -func predictFiles(a Args, pattern string, allowFiles bool) []string { - if strings.HasSuffix(a.Last, "/..") { - return nil - } - - dir := a.Directory() - files := listFiles(dir, pattern, allowFiles) - - // add dir if match - files = append(files, dir) - - return PredictFilesSet(files).Predict(a) -} - -// PredictFilesSet predict according to file rules to a given set of file names -func PredictFilesSet(files []string) PredictFunc { - return func(a Args) (prediction []string) { - // add all matching files to prediction - for _, f := range files { - f = fixPathForm(a.Last, f) - - // test matching of file to the argument - if match.File(f, a.Last) { - prediction = append(prediction, f) - } - } - return - } -} - -func listFiles(dir, pattern string, allowFiles bool) []string { - // set of all file names - m := map[string]bool{} - - // list files - if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { - for _, f := range files { - if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { - m[f] = true - } - } - } - - // list directories - if dirs, err := ioutil.ReadDir(dir); err == nil { - for _, d := range dirs { - if d.IsDir() { - m[filepath.Join(dir, d.Name())] = true - } - } - } - - list := make([]string, 0, len(m)) - for k := range m { - list = append(list, k) - } - return list -} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go deleted file mode 100644 index 8fc59d7..0000000 --- a/vendor/github.com/posener/complete/predict_set.go +++ /dev/null @@ -1,19 +0,0 @@ -package complete - -import "github.com/posener/complete/match" - -// PredictSet expects specific set of terms, given in the options argument. -func PredictSet(options ...string) Predictor { - return predictSet(options) -} - -type predictSet []string - -func (p predictSet) Predict(a Args) (prediction []string) { - for _, m := range p { - if match.Prefix(m, a.Last) { - prediction = append(prediction, m) - } - } - return -} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md deleted file mode 100644 index 74077e3..0000000 --- a/vendor/github.com/posener/complete/readme.md +++ /dev/null @@ -1,116 +0,0 @@ -# complete - -[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) -[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) -[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) -[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) - -A tool for bash writing bash completion in go. - -Writing bash completion scripts is a hard work. This package provides an easy way -to create bash completion scripts for any command, and also an easy way to install/uninstall -the completion of the command. - -## go command bash completion - -In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. - -This is an example that uses the `complete` package on the `go` command - the `complete` package -can also be used to implement any completions, see [Usage](#usage). - -### Install - -1. Type in your shell: -``` -go get -u github.com/posener/complete/gocomplete -gocomplete -install -``` - -2. Restart your shell - -Uninstall by `gocomplete -uninstall` - -### Features - -- Complete `go` command, including sub commands and all flags. -- Complete packages names or `.go` files when necessary. -- Complete test names after `-run` flag. - -## complete package - -Supported shells: - -- [x] bash -- [x] zsh - -### Usage - -Assuming you have program called `run` and you want to have bash completion -for it, meaning, if you type `run` then space, then press the `Tab` key, -the shell will suggest relevant complete options. - -In that case, we will create a program called `runcomplete`, a go program, -with a `func main()` and so, that will make the completion of the `run` -program. Once the `runcomplete` will be in a binary form, we could -`runcomplete -install` and that will add to our shell all the bash completion -options for `run`. - -So here it is: - -```go -import "github.com/posener/complete" - -func main() { - - // create a Command object, that represents the command we want - // to complete. - run := complete.Command{ - - // Sub defines a list of sub commands of the program, - // this is recursive, since every command is of type command also. - Sub: complete.Commands{ - - // add a build sub command - "build": complete.Command { - - // define flags of the build sub command - Flags: complete.Flags{ - // build sub command has a flag '-cpus', which - // expects number of cpus after it. in that case - // anything could complete this flag. - "-cpus": complete.PredictAnything, - }, - }, - }, - - // define flags of the 'run' main command - Flags: complete.Flags{ - // a flag -o, which expects a file ending with .out after - // it, the tab completion will auto complete for files matching - // the given pattern. - "-o": complete.PredictFiles("*.out"), - }, - - // define global flags of the 'run' main command - // those will show up also when a sub command was entered in the - // command line - GlobalFlags: complete.Flags{ - - // a flag '-h' which does not expects anything after it - "-h": complete.PredictNothing, - }, - } - - // run the command completion, as part of the main() function. - // this triggers the autocompletion when needed. - // name must be exactly as the binary that we want to complete. - complete.New("run", run).Run() -} -``` - -### Self completing program - -In case that the program that we want to complete is written in go we -can make it self completing. - -Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh deleted file mode 100755 index 56bfcf1..0000000 --- a/vendor/github.com/posener/complete/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -v -race -coverprofile=profile.out -covermode=atomic $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go deleted file mode 100644 index 58b8b79..0000000 --- a/vendor/github.com/posener/complete/utils.go +++ /dev/null @@ -1,46 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" -) - -// fixPathForm changes a file name to a relative name -func fixPathForm(last string, file string) string { - // get wording directory for relative name - workDir, err := os.Getwd() - if err != nil { - return file - } - - abs, err := filepath.Abs(file) - if err != nil { - return file - } - - // if last is absolute, return path as absolute - if filepath.IsAbs(last) { - return fixDirPath(abs) - } - - rel, err := filepath.Rel(workDir, abs) - if err != nil { - return file - } - - // fix ./ prefix of path - if rel != "." && strings.HasPrefix(last, ".") { - rel = "./" + rel - } - - return fixDirPath(rel) -} - -func fixDirPath(path string) string { - info, err := os.Stat(path) - if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { - path += "/" - } - return path -} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 488357b..0000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2016 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index b6aad1c..0000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires Go >= 1.2. - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2016 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index 295f3fc..0000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" -) - -// UUID layout variants. -const ( - VariantNCS = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - storageOnce sync.Once - epochFunc = unixTimeFunc - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -func initClockSequence() { - buf := make([]byte, 2) - safeRandom(buf) - clockSequence = binary.BigEndian.Uint16(buf) -} - -func initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - safeRandom(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 -} - -func initStorage() { - initClockSequence() - initHardwareAddr() -} - -func safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// The nil UUID is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") -) - -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() uint { - switch { - case (u[8] & 0x80) == 0x00: - return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: - return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: - return VariantMicrosoft - } - return VariantFuture -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - t := text[:] - braced := false - - if bytes.Equal(t[:9], urnPrefix) { - t = t[9:] - } else if t[0] == '{' { - braced = true - t = t[1:] - } - - b := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - if t[0] != '-' { - err = fmt.Errorf("uuid: invalid string format") - return - } - t = t[1:] - } - - if len(t) < byteGroup { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - if i == 4 && len(t) > byteGroup && - ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { - err = fmt.Errorf("uuid: UUID string too long: %s", text) - return - } - - _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) - if err != nil { - return - } - - t = t[byteGroup:] - b = b[byteGroup/2:] - } - - return -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func getStorage() (uint64, uint16, []byte) { - storageOnce.Do(initStorage) - - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence, hardwareAddr[:] -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - safeRandom(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/shiena/ansicolor/LICENSE b/vendor/github.com/shiena/ansicolor/LICENSE deleted file mode 100644 index e58473e..0000000 --- a/vendor/github.com/shiena/ansicolor/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) [2014] [shiena] - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/shiena/ansicolor/README.md b/vendor/github.com/shiena/ansicolor/README.md deleted file mode 100644 index cf2ae40..0000000 --- a/vendor/github.com/shiena/ansicolor/README.md +++ /dev/null @@ -1,101 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/shiena/ansicolor?status.svg)](https://godoc.org/github.com/shiena/ansicolor) - -# ansicolor - -Ansicolor library provides color console in Windows as ANSICON for Golang. - -## Features - -|Escape sequence|Text attributes| -|---------------|----| -|\x1b[0m|All attributes off(color at startup)| -|\x1b[1m|Bold on(enable foreground intensity)| -|\x1b[4m|Underline on| -|\x1b[5m|Blink on(enable background intensity)| -|\x1b[21m|Bold off(disable foreground intensity)| -|\x1b[24m|Underline off| -|\x1b[25m|Blink off(disable background intensity)| - -|Escape sequence|Foreground colors| -|---------------|----| -|\x1b[30m|Black| -|\x1b[31m|Red| -|\x1b[32m|Green| -|\x1b[33m|Yellow| -|\x1b[34m|Blue| -|\x1b[35m|Magenta| -|\x1b[36m|Cyan| -|\x1b[37m|White| -|\x1b[39m|Default(foreground color at startup)| -|\x1b[90m|Light Gray| -|\x1b[91m|Light Red| -|\x1b[92m|Light Green| -|\x1b[93m|Light Yellow| -|\x1b[94m|Light Blue| -|\x1b[95m|Light Magenta| -|\x1b[96m|Light Cyan| -|\x1b[97m|Light White| - -|Escape sequence|Background colors| -|---------------|----| -|\x1b[40m|Black| -|\x1b[41m|Red| -|\x1b[42m|Green| -|\x1b[43m|Yellow| -|\x1b[44m|Blue| -|\x1b[45m|Magenta| -|\x1b[46m|Cyan| -|\x1b[47m|White| -|\x1b[49m|Default(background color at startup)| -|\x1b[100m|Light Gray| -|\x1b[101m|Light Red| -|\x1b[102m|Light Green| -|\x1b[103m|Light Yellow| -|\x1b[104m|Light Blue| -|\x1b[105m|Light Magenta| -|\x1b[106m|Light Cyan| -|\x1b[107m|Light White| - -## Example - -```go -package main - -import ( - "fmt" - "os" - - "github.com/shiena/ansicolor" -) - -func main() { - w := ansicolor.NewAnsiColorWriter(os.Stdout) - text := "%sforeground %sbold%s %sbackground%s\n" - fmt.Fprintf(w, text, "\x1b[31m", "\x1b[1m", "\x1b[21m", "\x1b[41;32m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[32m", "\x1b[1m", "\x1b[21m", "\x1b[42;31m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[33m", "\x1b[1m", "\x1b[21m", "\x1b[43;34m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[34m", "\x1b[1m", "\x1b[21m", "\x1b[44;33m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[35m", "\x1b[1m", "\x1b[21m", "\x1b[45;36m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[36m", "\x1b[1m", "\x1b[21m", "\x1b[46;35m", "\x1b[0m") - fmt.Fprintf(w, text, "\x1b[37m", "\x1b[1m", "\x1b[21m", "\x1b[47;30m", "\x1b[0m") -} -``` - -![screenshot](https://gist.githubusercontent.com/shiena/a1bada24b525314a7d5e/raw/c763aa7cda6e4fefaccf831e2617adc40b6151c7/main.png) - -## See also: - -- https://github.com/daviddengcn/go-colortext -- https://github.com/adoxa/ansicon -- https://github.com/aslakhellesoy/wac -- https://github.com/wsxiaoys/terminal -- https://github.com/mattn/go-colorable - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - diff --git a/vendor/github.com/shiena/ansicolor/ansicolor.go b/vendor/github.com/shiena/ansicolor/ansicolor.go deleted file mode 100644 index 8551345..0000000 --- a/vendor/github.com/shiena/ansicolor/ansicolor.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 shiena Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -// Package ansicolor provides color console in Windows as ANSICON. -package ansicolor - -import "io" - -type outputMode int - -// DiscardNonColorEscSeq supports the divided color escape sequence. -// But non-color escape sequence is not output. -// Please use the OutputNonColorEscSeq If you want to output a non-color -// escape sequences such as ncurses. However, it does not support the divided -// color escape sequence. -const ( - _ outputMode = iota - DiscardNonColorEscSeq - OutputNonColorEscSeq -) - -// NewAnsiColorWriter creates and initializes a new ansiColorWriter -// using io.Writer w as its initial contents. -// In the console of Windows, which change the foreground and background -// colors of the text by the escape sequence. -// In the console of other systems, which writes to w all text. -func NewAnsiColorWriter(w io.Writer) io.Writer { - return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq) -} - -// NewModeAnsiColorWriter create and initializes a new ansiColorWriter -// by specifying the outputMode. -func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { - if _, ok := w.(*ansiColorWriter); !ok { - return &ansiColorWriter{ - w: w, - mode: mode, - } - } - return w -} diff --git a/vendor/github.com/shiena/ansicolor/ansicolor_ansi.go b/vendor/github.com/shiena/ansicolor/ansicolor_ansi.go deleted file mode 100644 index f4803bb..0000000 --- a/vendor/github.com/shiena/ansicolor/ansicolor_ansi.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 shiena Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -// +build !windows - -package ansicolor - -import "io" - -type ansiColorWriter struct { - w io.Writer - mode outputMode -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - return cw.w.Write(p) -} diff --git a/vendor/github.com/shiena/ansicolor/ansicolor_windows.go b/vendor/github.com/shiena/ansicolor/ansicolor_windows.go deleted file mode 100644 index ff2ae0e..0000000 --- a/vendor/github.com/shiena/ansicolor/ansicolor_windows.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2014 shiena Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -// +build windows - -package ansicolor - -import ( - "bytes" - "io" - "strings" - "syscall" - "unsafe" -) - -type csiState int - -const ( - outsideCsiCode csiState = iota - firstCsiCode - secondCsiCode -) - -type parseResult int - -const ( - noConsole parseResult = iota - changedColor - unknown -) - -type ansiColorWriter struct { - w io.Writer - mode outputMode - state csiState - paramStartBuf bytes.Buffer - paramBuf bytes.Buffer -} - -const ( - firstCsiChar byte = '\x1b' - secondeCsiChar byte = '[' - separatorChar byte = ';' - sgrCode byte = 'm' -) - -const ( - foregroundBlue = uint16(0x0001) - foregroundGreen = uint16(0x0002) - foregroundRed = uint16(0x0004) - foregroundIntensity = uint16(0x0008) - backgroundBlue = uint16(0x0010) - backgroundGreen = uint16(0x0020) - backgroundRed = uint16(0x0040) - backgroundIntensity = uint16(0x0080) - underscore = uint16(0x8000) - - foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity - backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity -) - -const ( - ansiReset = "0" - ansiIntensityOn = "1" - ansiIntensityOff = "21" - ansiUnderlineOn = "4" - ansiUnderlineOff = "24" - ansiBlinkOn = "5" - ansiBlinkOff = "25" - - ansiForegroundBlack = "30" - ansiForegroundRed = "31" - ansiForegroundGreen = "32" - ansiForegroundYellow = "33" - ansiForegroundBlue = "34" - ansiForegroundMagenta = "35" - ansiForegroundCyan = "36" - ansiForegroundWhite = "37" - ansiForegroundDefault = "39" - - ansiBackgroundBlack = "40" - ansiBackgroundRed = "41" - ansiBackgroundGreen = "42" - ansiBackgroundYellow = "43" - ansiBackgroundBlue = "44" - ansiBackgroundMagenta = "45" - ansiBackgroundCyan = "46" - ansiBackgroundWhite = "47" - ansiBackgroundDefault = "49" - - ansiLightForegroundGray = "90" - ansiLightForegroundRed = "91" - ansiLightForegroundGreen = "92" - ansiLightForegroundYellow = "93" - ansiLightForegroundBlue = "94" - ansiLightForegroundMagenta = "95" - ansiLightForegroundCyan = "96" - ansiLightForegroundWhite = "97" - - ansiLightBackgroundGray = "100" - ansiLightBackgroundRed = "101" - ansiLightBackgroundGreen = "102" - ansiLightBackgroundYellow = "103" - ansiLightBackgroundBlue = "104" - ansiLightBackgroundMagenta = "105" - ansiLightBackgroundCyan = "106" - ansiLightBackgroundWhite = "107" -) - -type drawType int - -const ( - foreground drawType = iota - background -) - -type winColor struct { - code uint16 - drawType drawType -} - -var colorMap = map[string]winColor{ - ansiForegroundBlack: {0, foreground}, - ansiForegroundRed: {foregroundRed, foreground}, - ansiForegroundGreen: {foregroundGreen, foreground}, - ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, - ansiForegroundBlue: {foregroundBlue, foreground}, - ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, - ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, - ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiBackgroundBlack: {0, background}, - ansiBackgroundRed: {backgroundRed, background}, - ansiBackgroundGreen: {backgroundGreen, background}, - ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, - ansiBackgroundBlue: {backgroundBlue, background}, - ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, - ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, - ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, - ansiBackgroundDefault: {0, background}, - - ansiLightForegroundGray: {foregroundIntensity, foreground}, - ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, - ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, - ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, - ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, - ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, - ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, - ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiLightBackgroundGray: {backgroundIntensity, background}, - ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, - ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, - ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, - ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, - ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, - ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, - ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - defaultAttr *textAttributes -) - -func init() { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo != nil { - colorMap[ansiForegroundDefault] = winColor{ - screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), - foreground, - } - colorMap[ansiBackgroundDefault] = winColor{ - screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), - background, - } - defaultAttr = convertTextAttr(screenInfo.WAttributes) - } -} - -type coord struct { - X, Y int16 -} - -type smallRect struct { - Left, Top, Right, Bottom int16 -} - -type consoleScreenBufferInfo struct { - DwSize coord - DwCursorPosition coord - WAttributes uint16 - SrWindow smallRect - DwMaximumWindowSize coord -} - -func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { - var csbi consoleScreenBufferInfo - ret, _, _ := procGetConsoleScreenBufferInfo.Call( - hConsoleOutput, - uintptr(unsafe.Pointer(&csbi))) - if ret == 0 { - return nil - } - return &csbi -} - -func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { - ret, _, _ := procSetConsoleTextAttribute.Call( - hConsoleOutput, - uintptr(wAttributes)) - return ret != 0 -} - -type textAttributes struct { - foregroundColor uint16 - backgroundColor uint16 - foregroundIntensity uint16 - backgroundIntensity uint16 - underscore uint16 - otherAttributes uint16 -} - -func convertTextAttr(winAttr uint16) *textAttributes { - fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) - bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) - fgIntensity := winAttr & foregroundIntensity - bgIntensity := winAttr & backgroundIntensity - underline := winAttr & underscore - otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) - return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} -} - -func convertWinAttr(textAttr *textAttributes) uint16 { - var winAttr uint16 - winAttr |= textAttr.foregroundColor - winAttr |= textAttr.backgroundColor - winAttr |= textAttr.foregroundIntensity - winAttr |= textAttr.backgroundIntensity - winAttr |= textAttr.underscore - winAttr |= textAttr.otherAttributes - return winAttr -} - -func changeColor(param []byte) parseResult { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo == nil { - return noConsole - } - - winAttr := convertTextAttr(screenInfo.WAttributes) - strParam := string(param) - if len(strParam) <= 0 { - strParam = "0" - } - csiParam := strings.Split(strParam, string(separatorChar)) - for _, p := range csiParam { - c, ok := colorMap[p] - switch { - case !ok: - switch p { - case ansiReset: - winAttr.foregroundColor = defaultAttr.foregroundColor - winAttr.backgroundColor = defaultAttr.backgroundColor - winAttr.foregroundIntensity = defaultAttr.foregroundIntensity - winAttr.backgroundIntensity = defaultAttr.backgroundIntensity - winAttr.underscore = 0 - winAttr.otherAttributes = 0 - case ansiIntensityOn: - winAttr.foregroundIntensity = foregroundIntensity - case ansiIntensityOff: - winAttr.foregroundIntensity = 0 - case ansiUnderlineOn: - winAttr.underscore = underscore - case ansiUnderlineOff: - winAttr.underscore = 0 - case ansiBlinkOn: - winAttr.backgroundIntensity = backgroundIntensity - case ansiBlinkOff: - winAttr.backgroundIntensity = 0 - default: - // unknown code - } - case c.drawType == foreground: - winAttr.foregroundColor = c.code - case c.drawType == background: - winAttr.backgroundColor = c.code - } - } - winTextAttribute := convertWinAttr(winAttr) - setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) - - return changedColor -} - -func parseEscapeSequence(command byte, param []byte) parseResult { - if defaultAttr == nil { - return noConsole - } - - switch command { - case sgrCode: - return changeColor(param) - default: - return unknown - } -} - -func (cw *ansiColorWriter) flushBuffer() (int, error) { - return cw.flushTo(cw.w) -} - -func (cw *ansiColorWriter) resetBuffer() (int, error) { - return cw.flushTo(nil) -} - -func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { - var n1, n2 int - var err error - - startBytes := cw.paramStartBuf.Bytes() - cw.paramStartBuf.Reset() - if w != nil { - n1, err = cw.w.Write(startBytes) - if err != nil { - return n1, err - } - } else { - n1 = len(startBytes) - } - paramBytes := cw.paramBuf.Bytes() - cw.paramBuf.Reset() - if w != nil { - n2, err = cw.w.Write(paramBytes) - if err != nil { - return n1 + n2, err - } - } else { - n2 = len(paramBytes) - } - return n1 + n2, nil -} - -func isParameterChar(b byte) bool { - return ('0' <= b && b <= '9') || b == separatorChar -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - r, nw, first, last := 0, 0, 0, 0 - if cw.mode != DiscardNonColorEscSeq { - cw.state = outsideCsiCode - cw.resetBuffer() - } - - var err error - for i, ch := range p { - switch cw.state { - case outsideCsiCode: - if ch == firstCsiChar { - cw.paramStartBuf.WriteByte(ch) - cw.state = firstCsiCode - } - case firstCsiCode: - switch ch { - case firstCsiChar: - cw.paramStartBuf.WriteByte(ch) - break - case secondeCsiChar: - cw.paramStartBuf.WriteByte(ch) - cw.state = secondCsiCode - last = i - 1 - default: - cw.resetBuffer() - cw.state = outsideCsiCode - } - case secondCsiCode: - if isParameterChar(ch) { - cw.paramBuf.WriteByte(ch) - } else { - nw, err = cw.w.Write(p[first:last]) - r += nw - if err != nil { - return r, err - } - first = i + 1 - result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) - if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) { - cw.paramBuf.WriteByte(ch) - nw, err := cw.flushBuffer() - if err != nil { - return r, err - } - r += nw - } else { - n, _ := cw.resetBuffer() - // Add one more to the size of the buffer for the last ch - r += n + 1 - } - - cw.state = outsideCsiCode - } - default: - cw.state = outsideCsiCode - } - } - - if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode { - nw, err = cw.w.Write(p[first:len(p)]) - r += nw - } - - return r, err -} diff --git a/vendor/github.com/sky-uk/gonsx/CONTRIBUTING.md b/vendor/github.com/sky-uk/gonsx/CONTRIBUTING.md deleted file mode 100644 index 90cebaa..0000000 --- a/vendor/github.com/sky-uk/gonsx/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing Guidelines - -Contribuations are always welcome, although please search issues and pull requests before adding something new to avoid duplicating -efforts and conversations. - - - -### Rules - -All Contributions are more than welcome but must be submitted using **Pull requests** with following guidelines. - -* Include tests for any new functionality -* Don't delete any existing tests unless absolutely necessary -* Don't change too much in one pull request, small changes pull requests are quickly reviewed and merged and save everybody time. -* Separate out different changes into different pull requests -* Please mention in your pull requests if your changes are not backwards compatiabile so that we can increase appropriate vesion number. - -## Code Style - -* This repository uses go fmt to maintain code style and consistency. -* Test folder names should be lowercase -* Test files should also be lowercase and end with `_test.go` -* We use vendoring for dependency package support. -* All files and folders should be snake_case. - -### Creating Issues - -Submit issues to the issue tracker on the appropriate repository for suggestions, recommendations, and bugs. -Please note: If it's an issue that's urgent / you feel you can fix yourself, please feel free to make some changes and submit a pull request. We'd love to see your contributions. - - -### Pull Requests - -* Create a new local branch for your work in your fork. -* As early as possible, create a pull request in the appropriate repository. Make sure you give enough information in the pull request description, and add the label `in-progress` with any other appropriate label. -* Once any conflicts have been fixed and you're ready for your code to be reviewed, remove the `in-progress` label and add `merge-ready`. -* Get a code review. Two of these must be core maintainers of GoNSX. -* You need two :shipit: :shipit: left as comments on the pull request. -* One of the core maintainers will merge the changes and apply appropriate versioning to release (see below). - -### Discussion - -For discussion of issues and general project talk, head over to [https://gitter.im/gonsx/Lobby](https://gitter.im/gonsx/Lobby). - -### Releases - -Declaring formal releases remains the prerogative of the project maintainer. We use Semantic Versioning in the releases. - -### Changes to this arrangement - -This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change. - -## Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -- (a) The contribution was created in whole or in part by me and I have the right to - submit it under the open source license indicated in the file; or - -- (b) The contribution is based upon previous work that, to the best of my knowledge, is - covered under an appropriate open source license and I have the right under that license - to submit that work with modifications, whether created in whole or in part by me, under - the same open source license (unless I am permitted to submit under a different - license), as indicated in the file; or - -- (c) The contribution was provided directly to me by some other person who certified - (a), (b) or (c) and I have not modified it. - -- (d) I understand and agree that this project and the contribution are public and that a - record of the contribution (including all personal information I submit with it, - including my sign-off) is maintained indefinitely and may be redistributed consistent - with this project or the open source license(s) involved. \ No newline at end of file diff --git a/vendor/github.com/sky-uk/gonsx/Dockerfile b/vendor/github.com/sky-uk/gonsx/Dockerfile deleted file mode 100644 index bb30f52..0000000 --- a/vendor/github.com/sky-uk/gonsx/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM golang:1.8 - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update \ - && apt-get install -y software-properties-common python-pip \ - python-setuptools \ - python-dev \ - build-essential \ - libssl-dev \ - libffi-dev \ - && apt-get install --no-install-suggests --no-install-recommends -y \ - curl \ - git \ - build-essential \ - python-netaddr \ - unzip \ - vim \ - wget \ - && apt-get clean -y \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* /tmp/* - -# reload code -RUN go get github.com/pilu/fresh - -RUN go get -u github.com/kardianos/govendor - -# Grab the source code and add it to the workspace. -ENV PATHWORK=/go/src/github.com/sky-uk/gonsx/ -ADD ./ $PATHWORK -WORKDIR $PATHWORK - -RUN govendor sync - -ADD ./docker/* / -RUN chmod 755 /entrypoint.sh -CMD /entrypoint.sh diff --git a/vendor/github.com/sky-uk/gonsx/Jenkinsfile b/vendor/github.com/sky-uk/gonsx/Jenkinsfile deleted file mode 100644 index dd0d3db..0000000 --- a/vendor/github.com/sky-uk/gonsx/Jenkinsfile +++ /dev/null @@ -1,147 +0,0 @@ -#!groovy - -project_name = 'gonsx' -project_owner = 'sky-uk' - -project_src_path = "github.com/${project_owner}/${project_name}" -github_token_id = 'svc-paas-github-access-token-as-text' -github_credentials_id = 'svc-paas-github-deploy-key' - -version_file = 'VERSION' -major_version = null -minor_version = null -patch_version = null - -docker_image = "paas/golang-img:0.8.8" - -// helpers -gitHelper = null -shellHelper = null -goHelper = null -slackHelper = null - -slackChannel = '#ott-paas' - -loadHelpers() - -slackHelper.notificationWrapper(slackChannel, currentBuild, env, true) { - node { - wrap([$class: 'TimestamperBuildWrapper']) { - wrap([$class: 'AnsiColorBuildWrapper']) { - stage 'checkout' - deleteDir() - git_branch = env.BRANCH_NAME - checkout scm - gitHelper.prepareGit('svc-paas-github', 'svc-paas-github@jenkins.paas.int.ovp.bskyb.com') - - stage 'version' - if (autoincVersion()) { - writeFile file: version_file, text: version() - gitHelper.commit(version_file, "bumping to: ${version()}") - } - - echo "Starting pipeline for project: [${project_name}], branch: [${git_branch}], version: [${version()}]" - - stage 'lint' - inContainer { - goHelper.goLint(project_src_path) - } - - stage 'format' - inContainer { - goHelper.goFmt(project_src_path) - } - - stage 'vet' - inContainer { - goHelper.goVet(project_src_path) - } - - stage 'build' - inContainer { - goHelper.goBuild(project_src_path) - } - - stage 'test' - inContainer { - goHelper.goTest(project_src_path) - } - - stage 'coverage' - inContainer { - goHelper.goCoverage(project_src_path) - } - } - } - } - // we only release from master - if (git_branch == 'master' && !gitHelper.isLastCommitFromUser('svc-paas-github')) { - stage 'release' - def approved = true - timeout(time: 2, unit: 'HOURS') { - try { - input message: "Release ${project_name} ${version()} ?" - } catch (InterruptedException _x) { - echo "Releasing not approved in time!" - approved = false - } - } - - if (approved) { - echo "Release has been approved!" - node() { - gitHelper.tag(version(), "Jenkins ${env.JOB_NAME} ${env.BUILD_DISPLAY_NAME}") - gitHelper.push(github_credentials_id, git_branch) - - echo "Creating GitHub Release v${version()}" - withCredentials([string(credentialsId: github_token_id, variable: 'GITHUB_TOKEN')]) { - def github_release_response = gitHelper.createGitHubRelease(env.GITHUB_TOKEN, project_owner, project_name, version(), git_branch) - echo "${github_release_response}" - // FIXME: this is not working yet - // echo "Attaching artifacts to GitHub Release v${version()}" - // gitHelper.uploadToGitHubRelease(project_github_token, project_owner, project_name, github_release_response.id, "${pwd()}/coverage.html", 'application/html') - } - } - currentBuild.description = "Released ${version()}" - } - } -} - -def loadHelpers() { - fileLoader.withGit('git@github.com:sky-uk/paas-jenkins-pipelines.git', 'master', github_credentials_id, '') { - this.gitHelper = fileLoader.load('lib/helpers/git') - this.shellHelper = fileLoader.load('lib/helpers/shell') - this.goHelper = fileLoader.load('lib/helpers/go') - this.slackHelper = fileLoader.load('lib/helpers/slack') - } -} - -def autoincVersion() { - current_version = readFile("${pwd()}/${this.version_file}").trim().tokenize(".") - setVersion(current_version[0], current_version[1], current_version[2]) - - if(gitHelper.checkIfTagExists(version())) { - this.patch_version++ - if(gitHelper.checkIfTagExists(version())) { - error "Next patch version (${version()}) already exists!" - } - return true - } - return false -} - -def version() { - return "${this.major_version}.${this.minor_version}.${this.patch_version}" -} - -def setVersion(major, minor, patch) { - this.major_version = major.toInteger() - this.minor_version = minor.toInteger() - this.patch_version = patch.toInteger() -} - -def inContainer(Closure body) { - docker.image(this.docker_image).inside("-v ${pwd()}:/paas/go/src/${project_src_path} -v ${System.getProperty('java.io.tmpdir')}:${System.getProperty('java.io.tmpdir')}") { - body() - } -} diff --git a/vendor/github.com/sky-uk/gonsx/LICENSE b/vendor/github.com/sky-uk/gonsx/LICENSE deleted file mode 100644 index ab1d036..0000000 --- a/vendor/github.com/sky-uk/gonsx/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2017, Sky UK Ltd -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sky-uk/gonsx/Makefile b/vendor/github.com/sky-uk/gonsx/Makefile deleted file mode 100644 index f18ca5f..0000000 --- a/vendor/github.com/sky-uk/gonsx/Makefile +++ /dev/null @@ -1,70 +0,0 @@ -PACKAGE = github.com/sky-uk/gonsx -BINARYNAME = gonsx-cli -DATE ?= $(shell date +%FT%T%z) -VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \ - cat $(CURDIR)/VERSION 2> /dev/null || echo v0) - -GOPATH = $(CURDIR)/.gopath~ -BIN = $(GOPATH)/bin -BASE = $(GOPATH)/src/$(PACKAGE) -PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -v "^$(PACKAGE)/vendor/")) -TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS)) - - -GO = go -GODOC = godoc -GOFMT = gofmt -GLIDE = glide -TIMEOUT = 15 -V = 0 -Q = $(if $(filter 1,$V),,@) -M = $(shell printf "\033[34;1m▶\033[0m") - -.PHONY: all -all: fmt lint test vendor | $(BASE) ; $(info $(M) building executable…) @ ## Build program binary - $Q cd $(BASE)/cli && $(GO) build -tags release -o ../$(BINARYNAME) ./*.go - -$(BASE): ; $(info $(M) setting GOPATH…) - @mkdir -p $(dir $@) - @ln -sf $(CURDIR) $@ - -# Tools - -GOLINT = $(BIN)/golint -$(BIN)/golint: | $(BASE) ; $(info $(M) building golint…) - $Q go get github.com/golang/lint/golint - -TEST_TARGETS := test-default -.PHONY: $(TEST_TARGETS) check test tests -$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%) -$(TEST_TARGETS): test -check test tests: fmt lint vendor | $(BASE) ; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests - $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS) - -.PHONY: lint -lint: vendor | $(BASE) $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint - $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \ - test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \ - done ; exit $$ret - -.PHONY: fmt -fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files - @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./... | grep -v /vendor/); do \ - $(GOFMT) -l -w $$d/*.go || ret=$$? ; \ - done ; exit $$ret - -.PHONY: clean -clean: ; $(info $(M) cleaning…) @ ## Cleanup everything - @rm -rf $(GOPATH) - @rm -rf bin - @rm -rf test/tests.* test/coverage.* - @rm -rf gonsx-cli - -.PHONY: help -help: - @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ - awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' - -.PHONY: version -version: - @echo $(VERSION) diff --git a/vendor/github.com/sky-uk/gonsx/README.md b/vendor/github.com/sky-uk/gonsx/README.md deleted file mode 100644 index 2a040c6..0000000 --- a/vendor/github.com/sky-uk/gonsx/README.md +++ /dev/null @@ -1,451 +0,0 @@ -# GoNSX - Go library for VMware vSphere NSX API - -## Overview - -This project is a NSXClient library for talking to NSX API. - -### Features - -| Feature | Create | Read | Update | Delete | -|-------------------------|--------|-------|---------|--------| -| DHCP Relay | Y | Y | Y | Y | -| Edge Interface | Y | Y | Y | Y | -| Security Group | Y | Y | Y | Y | -| Security Policy | Y | Y | Y | Y | -| Security Tag | Y | Y | Y | Y | -| Security Tag Attachment | Y | Y | Y | Y | -| Service | Y | Y | Y | Y | -| Transport Zone | N | Y | N | N | -| Virtual Wire | Y | Y | Y | Y | - -Implementation of CRUD in above features is partial in some cases. - - -### Security Policy features - - -| Function | Attribute | Implemented | Required | -|-------------------------------|-----------------------------------|-------------|-------------| -| Common | Name (String) | Y | Y | -| | Description (String) | Y | Y | -| | Inherit Security Policy (Boolean) | N | N/A | -| | Parent Policy (String) | N | N/A | -| | Weight/Precedence (Integer) | Y | Y | -| Guest Introspection Services | List (String) | N | N/A | -| | Name (String) | N | N/A | -| | Action (String) | N | N/A | -| | Service Type (String) | N | N/A | -| | Service Name (String ) | N | N/A | -| | Service Profile (String) | N | N/A | -| | State (String) | N | N/A | -| | Enforce (Boolean) | N | N/A | -| Firewall rule | Name (String) | Y | Y | -| | Description (String) | Y | Y | -| | Action (String) | Y | Y | -| | Policy Security Groups (String[]) | Y | Y | -| | Negate Source (Boolean) | N | N | -| | Destination (String[]) | Y | Y | -| | Service (String[]) | Y | Y | -| | *State (String) | Y | Y | -| | Log (Boolean) | N | N/A | -| Network Introspection Service | Name (String) | N | N/A | -| | Description (String) | N | N/A | -| | Action (String) | N | N/A | -| | Service Name (String) | N | N/A | -| | Profile (String) | N | N/A | -| | Source (String[]) | N | N/A | -| | NegateSource (Boolean) | N | N/A | -| | Destination (String[]) | N | N/A | -| | NegateDestination (Boolean) | N | N/A | -| | Service (String) | N | N/A | -| | State (String) | N | N/A | -| | Log (Boolean) | N | N/A | - -*State is defaulted to true in all cases and can't be modified. - -### Security Group features - -Apart from the basic CRUD functionality the Security Group supports the following - -| Component | Funcionality Name | GONSX | -|-------------------------|-------------------------------------------------------------|-------| -| Security Group | Add Multiple Dynamic Membership Criteria | Y | -| | OS Name | Y | -| | Computer Name | Y | -| | VM Name | Y | -| | Security Tag | Y | -| | Entity : This could be | Y | -| | - Another Security Group | Y | -| | - Cluster | Y | -| | - Logical Switch | Y | -| | - Logical Port Group | Y | -| | - vApp | Y | -| | - IP Sets | Y | -| | - Directory Group | Y | -| | - MAC Sets | Y | -| | - Security Tag | Y | -| | - vNic | Y | -| | - Virtual Machine | Y | -| | - Resource Pool | Y | -| | - Distributed Port Group | Y | -| Security Group | Add More than one Dynamic Membership criteria | y | -| Security Group | Operators for more than one Dynamic Membership criteria | N | -| Security Group | Static Membership (always add these ) | N | -| | Security Group | N | -| | Cluster | N | -| | Logical Switch | N | -| | Logical Port Group | N | -| | vApp | N | -| | Datacenter | N | -| | IP Sets | N | -| | MAC Sets | N | -| | Security Tag | N | -| | Directory Group | N | -| | vNic | N | -| | Virtual Machine | N | -| | Resource Pool | N | -| | Distributed Port Group | N | -| Security Group | Static Exclusions (always exclude these ) | N | -| | Security Group | N | -| | Cluster | N | -| | Logical Switch | N | -| | Logical Port Group | N | -| | vApp | N | -| | Datacenter | N | -| | IP Sets | N | -| | MAC Sets | N | -| | Security Tag | N | -| | Directory Group | N | -| | vNic | N | -| | Virtual Machine | N | -| | Resource Pool | N | -| | Distributed Port Group | N | -| Security Group | Static Exclusions (never include) | N | -| | Security Group | N | -| | Cluster | N | -| | Logical Switch | N | -| | Logical Port Group | N | -| | vApp | N | -| | Datacenter | N | -| | IP Sets | N | -| | MAC Sets | N | -| | Security Tag | N | -| | Directory Group | N | -| | vNic | N | -| | Virtual Machine | N | -| | Resource Pool | N | -| | Distributed Port Group | N | - - -NOTE: - -Security Group - Add Multiple Dynamic Membership Criteria -This functionality is implemented partially, this is not accounted when creating , however we can do it while editing. - - -## Usage -### NSXClient - -The NSXClient is the class used to send requests to the NSX host and pass through credentials. - -Import the following files. - -To create an NSX object run the following code, with the correct params. - -``` -import ( - "github.com/sky-uk/gonsx" -) - -nsxclient := gonsx.NewNSXClient(url, username, password, ignoreSSL, debug) -``` -The params used: - -* url: URL of NSX host - -> E.G. https://nsxhost.com - -* username: NSX username -* password: NSX password -* ignoreSSL: bool on whether to ignore ssl (default false) -* debug: bool on whether to debug output (default false) - -The client is also used run the api calls once you have created the resource object. - -``` -nsxclient.Do(my_resource_obj) -``` - - -### Virtual Wire(Logical Switch) - -Virtual Wire resource. This resource will call the Virtual Wires api within NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/virtualwire -``` - -Create: - -``` - api := virtualwire.NewCreate(virtualWireCreateSpec, scopeID) - nsxclient.Do(api) -``` - -Read: -``` -api := virtualwire.NewGetAll(scopeID) -nsxclient.Do(api) -resp := api.GetResponse().FilterByName(virtualWireName) -``` - -Read single virtualwire: -``` -api := virtualwire.NewGet(virtualwireID) -nsxclient.Do(api) -resp := api.GetResponse() -``` - -Update: -``` -api := virtualwire.NewUpdate(name, desc, virtualwireID) -nsxclient.Do(api) -``` - -Delete: -``` -api := virtualwire.NewDelete(virtualWireID) -nsxclient.Do(delete_api) -``` - - -### Edge Interface - -Interface resource. This resource will call the interface api within NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/edgeinterface -``` - -Create: - -``` - api := edgeinterface.NewCreate(&edgeInterfaces, edgeId) - nsxclient.Do(api) - createdEdgeInterfaces := api.GetResponse() -``` - -Read: -``` -api := edgeinterface.NewGet((edgeid, index) -nsxclient.Do(api) -edge := api.GetResponse() -``` - -Update: -``` -api := edgeinterface.NewUpdate(edgeid, index, edge) -nsxclient.Do(api) - -``` - -Delete: -``` -api := edgeinterface.NewDelete(edgeid, index) -nsxclient.Do(api) -``` - -### Dhcp Relay - -DHCP resource. This resource will call the DHCP relay api within NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/dhcprelay -``` - -The Dhcp relay behaves differently in the API and as such it doesn't have a create and only an update and delete. -The delete function will remove the whole relay and all of its information. If you do not wish to do this and only -remove interfaces from the DHCP relay, then you must run an update instead. - -Read: -``` -api := dhcp.NewGetAll(edgeId) -nsxclient.Do(api) -``` - -Update: -``` -api := dhcprelay.NewUpdate(dhcpIpAddress, edgeId, relayAgentslist) -nsxclient.Do(api) -``` - -Delete: -``` -api := dhcprelay.NewDelete(edgeId) -nsxclient.Do(api) -``` - -### Security Tag - -Security tag resource. This resource will call the security tag api within NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/securitytag -``` - -Create: -``` -api := securitytag.NewCreate(name, desc) -nsxclient.Do(api) -``` - -Read: -``` -api := securitytag.NewGetAll() -nsxclient.Do(api) -``` - -Update: -``` -Not yet implemented -``` - -Delete: -``` -api := securitytag.NewDelete(securitytagID) -nsxclient.Do(api) -``` - -Detach: -``` -api := securitytag.NewDetach(securityTagID, vmID) -nsxclient.Do(api) -``` - -Attach: -``` -api := securitytag.NewAttach(securityTagID, vmID) -nsxclient.Do(api) -``` - - -### Service - -Service resource. This resource will call the service api with NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/service -``` - -Create: -``` -api := service.NewCreate(scopeID, name, desc, proto, ports) -nsxclient.Do(api) -``` - -Read: -``` -api := service.NewGetAll(scopeID) -nsxclient.Do(api) -``` - -Update: -``` -Not yet implemented -``` - -Delete: -``` -api := service.NewDelete(serviceID) -nsxclient.Do(api) -``` - - -### Security Group - -Security Group resource. This resource will call the security group api with NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/securitygroup -``` - -Create: -``` -api := securitygroup.NewCreate(scopeID, name, setOperator, criteriaOperator, criteriaKey, criteriaValue, criteria) -nsxclient.Do(api) -``` - -Read: -``` -api := securitygroup.NewGetAll(scopeID) -nsxclient.Do(api) -``` - -Update: -``` -Not yet implemented -``` - -Delete: -``` -api := securitygroup.NewDelete(serviceID) -nsxclient.Do(api) -``` - - -### Security Policy -Security Policy resource. This resource will call the security policy with NSX. -Import the following class: -``` -github.com/sky-uk/gonsx/api/securitypolicy -``` - -Create: -``` -api := securitypolicy.NewCreate(name, precendence, desc, securityGroupsIDs, actions) -nsxclient.Do(api) -``` - -Read: -``` -api := securitypolicy.NewGetAll() -nsxclient.Do(api) -``` - -Update: -``` -Not yet implemented -``` - -Delete: -``` -api := securitypolicy.NewDelete(securityPolicyID, force) -nsxclient.Do(api) -``` - -AddOutboundFirewall: -``` -securityPolicyToModify := getAllAPI.GetResponse().FilterByName(securityPolicyName) - -// we will use a help function to add a firewall rule. -securityPolicyToModify.AddOutboundFirewallAction( - firewallName, - action, - direction, - secGroupObjectIDs, - serviceIDs, -) - -updateAPI := securitypolicy.NewUpdate(securityPolicyToModify.ObjectID, securityPolicyToModify) -nsxclient.Do(updateAPI) -``` - -RemoveFirewall: -``` -securityPolicyToModify := getAllAPI.GetResponse().FilterByName(securityPolicyName) - -securityPolicyToModify.RemoveFirewallActionByName(firewallName) - -updateAPI := securitypolicy.NewUpdate(securityPolicyToModify.ObjectID, securityPolicyToModify) -nsxclient.Do(updateAPI) -``` diff --git a/vendor/github.com/sky-uk/gonsx/VERSION b/vendor/github.com/sky-uk/gonsx/VERSION deleted file mode 100644 index 50ac577..0000000 --- a/vendor/github.com/sky-uk/gonsx/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.3.15 \ No newline at end of file diff --git a/vendor/github.com/sky-uk/gonsx/api/base_api.go b/vendor/github.com/sky-uk/gonsx/api/base_api.go deleted file mode 100644 index 441cb1e..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/base_api.go +++ /dev/null @@ -1,73 +0,0 @@ -package api - -// BaseAPI - Base API struct. -type BaseAPI struct { - method string - endpoint string - requestObject interface{} - responseObject interface{} - - statusCode int - rawResponse []byte - err error -} - -// NewBaseAPI - Returns a new object of the BaseAPI. -func NewBaseAPI(method string, endpoint string, requestObject interface{}, responseObject interface{}) *BaseAPI { - return &BaseAPI{method, endpoint, requestObject, responseObject, 0, nil, nil} -} - -// RequestObject - Returns the request object of the BaseAPI -func (b *BaseAPI) RequestObject() interface{} { - return b.requestObject -} - -// ResponseObject - Returns the ResponseObject interface. -func (b *BaseAPI) ResponseObject() interface{} { - return b.responseObject -} - -// Method - Returns the Method string, i.e. GET, PUT, POST. -func (b *BaseAPI) Method() string { - return b.method -} - -// Endpoint - Returns the Endpoint url string. -func (b *BaseAPI) Endpoint() string { - return b.endpoint -} - -// StatusCode - Returns the status code of the api. -func (b *BaseAPI) StatusCode() int { - return b.statusCode -} - -// RawResponse - Returns the rawResponse object as byte type. -func (b *BaseAPI) RawResponse() []byte { - return b.rawResponse -} - -// Error - Returns the err the api. -func (b *BaseAPI) Error() error { - return b.err -} - -// SetStatusCode - Sets the statusCode from api object. -func (b *BaseAPI) SetStatusCode(statusCode int) { - b.statusCode = statusCode -} - -// SetRawResponse - Sets the rawResponse on api object. -func (b *BaseAPI) SetRawResponse(rawResponse []byte) { - b.rawResponse = rawResponse -} - -// SetError - Sets the err on api object. -func (b *BaseAPI) SetError(err error) { - b.err = err -} - -// SetResponseObject - Sets the responseObject on api. -func (b *BaseAPI) SetResponseObject(res interface{}) { - b.responseObject = res -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/create_dhcprelay.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/create_dhcprelay.go deleted file mode 100644 index 451a812..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/create_dhcprelay.go +++ /dev/null @@ -1,24 +0,0 @@ -package dhcprelay - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateDhcpRelayAPI ... -type CreateDhcpRelayAPI struct { - *api.BaseAPI -} - -// NewCreate creates a new object of UpdateDhcpRelayAPI -func NewCreate(edgeID string, dhcpRelay DhcpRelay) *CreateDhcpRelayAPI { - this := new(CreateDhcpRelayAPI) - requestPayload := dhcpRelay - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/4.0/edges/"+edgeID+"/dhcp/config/relay", requestPayload, new(string)) - return this -} - -// GetResponse returns the ResponseObject from UpdateDhcpRelayAPI -func (createAPI CreateDhcpRelayAPI) GetResponse() string { - return createAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/delete_dhcprelay.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/delete_dhcprelay.go deleted file mode 100644 index 20c10c8..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/delete_dhcprelay.go +++ /dev/null @@ -1,18 +0,0 @@ -package dhcprelay - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteDhcpRelayAPI - struct -type DeleteDhcpRelayAPI struct { - *api.BaseAPI -} - -// NewDelete - Generates a new DeleteDhcpRelayAPI object. -func NewDelete(edgeID string) *DeleteDhcpRelayAPI { - this := new(DeleteDhcpRelayAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/4.0/edges/"+edgeID+"/dhcp/config/relay", nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types.go deleted file mode 100644 index 970db9f..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types.go +++ /dev/null @@ -1,24 +0,0 @@ -package dhcprelay - -import "encoding/xml" - -// DhcpRelay - top level struct. -type DhcpRelay struct { - XMLName xml.Name `xml:"relay"` - RelayServer RelayServer `xml:"relayServer"` - RelayAgents []RelayAgent `xml:"relayAgents>relayAgent"` -} - -// RelayServer - relayserver within DhcpRelay object. -type RelayServer struct { - IPSets []string `xml:"groupingObjectId"` - IPAddress []string `xml:"ipAddress"` - DomainName []string `xml:"fqdn"` -} - -// RelayAgent - relayagent within DhcpRelay object. -type RelayAgent struct { - XMLName xml.Name `xml:"relayAgent"` - VnicIndex string `xml:"vnicIndex"` - GiAddress string `xml:"giAddress"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types_functions.go deleted file mode 100644 index 01f83f7..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/dhcprelay_types_functions.go +++ /dev/null @@ -1,56 +0,0 @@ -package dhcprelay - -import "fmt" - -func (s RelayServer) String() string { - return fmt.Sprintf("DhcpRelayServer ipAddress:%s.", s.IPAddress) -} - -func (d RelayAgent) String() string { - return fmt.Sprintf("DhcpRelayAgent VnicIndex:%s, GiAddress:%s.", d.VnicIndex, d.GiAddress) -} - -// FilterByIPAddress - Filters the DhcpRelay->RelayAgents with provided IP Address. -func (v DhcpRelay) FilterByIPAddress(ipAddress string) *RelayAgent { - var relayAgentFound RelayAgent - for _, relayAgent := range v.RelayAgents { - if relayAgent.GiAddress == ipAddress { - relayAgentFound = relayAgent - break - } - } - return &relayAgentFound -} - -// FilterByVnicIndex - Filters the DhcpRelay->RelayAgents with provided vnicIndex. -func (v DhcpRelay) FilterByVnicIndex(vnicIndex string) *RelayAgent { - var relayAgentFound RelayAgent - for _, relayAgent := range v.RelayAgents { - if relayAgent.VnicIndex == vnicIndex { - relayAgentFound = relayAgent - break - } - } - return &relayAgentFound -} - -// RemoveByVnicIndex - Removes the relayagent from DhcpRelay->RelayAgents which relates to provided vnicIndex. -func (v DhcpRelay) RemoveByVnicIndex(vnicIndex string) *DhcpRelay { - for idx, relayAgent := range v.RelayAgents { - if relayAgent.VnicIndex == vnicIndex { - v.RelayAgents = append(v.RelayAgents[:idx], v.RelayAgents[idx+1:]...) - break - } - } - return &v -} - -// CheckByVnicIndex - Returns true/false depending on if vnicIndex exists in DhcpRelay->RelayAgents list. -func (v DhcpRelay) CheckByVnicIndex(vnicIndex string) bool { - for _, relayAgent := range v.RelayAgents { - if relayAgent.VnicIndex == vnicIndex { - return true - } - } - return false -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/get_all_dhcprelay.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/get_all_dhcprelay.go deleted file mode 100644 index f154551..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/get_all_dhcprelay.go +++ /dev/null @@ -1,23 +0,0 @@ -package dhcprelay - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllDhcpRelaysAPI - struct -type GetAllDhcpRelaysAPI struct { - *api.BaseAPI -} - -// NewGetAll - returns GetAll api object of GetAllDhcpRelaysAPI type. -func NewGetAll(edgeID string) *GetAllDhcpRelaysAPI { - this := new(GetAllDhcpRelaysAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/4.0/edges/"+edgeID+"/dhcp/config/relay", nil, new(DhcpRelay)) - return this -} - -// GetResponse - Returns ResponseObject from GetAllDhcpRelaysAPI of DhcpRelay type. -func (getAllAPI GetAllDhcpRelaysAPI) GetResponse() *DhcpRelay { - return getAllAPI.ResponseObject().(*DhcpRelay) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/update_dhcprelay.go b/vendor/github.com/sky-uk/gonsx/api/dhcprelay/update_dhcprelay.go deleted file mode 100644 index 00fbddb..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/dhcprelay/update_dhcprelay.go +++ /dev/null @@ -1,24 +0,0 @@ -package dhcprelay - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateDhcpRelayAPI ... -type UpdateDhcpRelayAPI struct { - *api.BaseAPI -} - -// NewUpdate creates a new object of UpdateDhcpRelayAPI -func NewUpdate(edgeID string, dhcpRelay DhcpRelay) *UpdateDhcpRelayAPI { - this := new(UpdateDhcpRelayAPI) - requestPayload := dhcpRelay - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/4.0/edges/"+edgeID+"/dhcp/config/relay", requestPayload, new(string)) - return this -} - -// GetResponse returns the ResponseObject from UpdateDhcpRelayAPI -func (updateAPI UpdateDhcpRelayAPI) GetResponse() string { - return updateAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/create_edgeinterface.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/create_edgeinterface.go deleted file mode 100644 index b6e0567..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/create_edgeinterface.go +++ /dev/null @@ -1,25 +0,0 @@ -package edgeinterface - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateEdgeInterfaceAPI struct -type CreateEdgeInterfaceAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreateEdgeInterfaceAPI -func NewCreate(edgeInterfaceList *EdgeInterfaces, edgeID string) *CreateEdgeInterfaceAPI { - - this := new(CreateEdgeInterfaceAPI) - - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/4.0/edges/"+edgeID+"/interfaces/?action=patch", edgeInterfaceList, new(EdgeInterfaces)) - return this -} - -// GetResponse returns the ResponseObject. -func (createAPI CreateEdgeInterfaceAPI) GetResponse() *EdgeInterfaces { - return createAPI.ResponseObject().(*EdgeInterfaces) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/delete_edgeinterface.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/delete_edgeinterface.go deleted file mode 100644 index 4b8d014..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/delete_edgeinterface.go +++ /dev/null @@ -1,19 +0,0 @@ -package edgeinterface - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" - "strconv" -) - -// DeleteEdgeInterfaceAPI struct -type DeleteEdgeInterfaceAPI struct { - *api.BaseAPI -} - -// NewDelete returns a new delete method object of DeleteEdgeInterfaceAPI -func NewDelete(edgeID string, index int) *DeleteEdgeInterfaceAPI { - this := new(DeleteEdgeInterfaceAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/4.0/edges/"+edgeID+"/interfaces/"+strconv.Itoa(index), nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types.go deleted file mode 100644 index 22b6d3e..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types.go +++ /dev/null @@ -1,34 +0,0 @@ -package edgeinterface - -import "encoding/xml" - -// EdgeInterfaces top level list object -type EdgeInterfaces struct { - XMLName xml.Name `xml:"interfaces"` - Interfaces []EdgeInterface `xml:"interface"` -} - -// EdgeInterface object within EdgeInterfaces list. -type EdgeInterface struct { - XMLName xml.Name `xml:"interface"` - Name string `xml:"name"` - Label string `xml:"label,omitempty"` - Mtu int `xml:"mtu"` - Type string `xml:"type"` - IsConnected bool `xml:"isConnected"` - ConnectedToID string `xml:"connectedToId"` - AddressGroups AddressGroups `xml:"addressGroups"` - Index int `xml:"index,omitempty"` -} - -// AddressGroups within EdgeInterface. -type AddressGroups struct { - AddressGroups []AddressGroup `xml:"addressGroup"` -} - -// AddressGroup object within AddressGroup list. -type AddressGroup struct { - XMLName xml.Name `xml:"addressGroup"` - PrimaryAddress string `xml:"primaryAddress"` - SubnetMask string `xml:"subnetMask"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types_functions.go deleted file mode 100644 index 203fce0..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/edgeinterface_types_functions.go +++ /dev/null @@ -1,24 +0,0 @@ -package edgeinterface - -import "fmt" - -func (s EdgeInterfaces) String() string { - return fmt.Sprintf("%v", s.Interfaces) -} - -//func (s EdgeInterface) String() string { -// return fmt.Sprintf("index: %s, name: %s", s.Index, s.Name) -//} - -// FilterByName filters EdgeInterfaces list by given name of interface and returns -// the found interface object. -func (s EdgeInterfaces) FilterByName(name string) *EdgeInterface { - var edgeInterfaceFound EdgeInterface - for _, edgeInterface := range s.Interfaces { - if edgeInterface.Name == name { - edgeInterfaceFound = edgeInterface - break - } - } - return &edgeInterfaceFound -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_all_edgeinterfaces.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_all_edgeinterfaces.go deleted file mode 100644 index 17c4cb3..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_all_edgeinterfaces.go +++ /dev/null @@ -1,23 +0,0 @@ -package edgeinterface - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllEdgeInterfacesAPI base object. -type GetAllEdgeInterfacesAPI struct { - *api.BaseAPI -} - -// NewGetAll returns the api object of GetAllEdgeInterfacesAPI -func NewGetAll(edgeID string) *GetAllEdgeInterfacesAPI { - this := new(GetAllEdgeInterfacesAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/4.0/edges/"+edgeID+"/interfaces", nil, new(EdgeInterfaces)) - return this -} - -// GetResponse returns ResponseObject of GetAllEdgeInterfacesAPI -func (getAllAPI GetAllEdgeInterfacesAPI) GetResponse() *EdgeInterfaces { - return getAllAPI.ResponseObject().(*EdgeInterfaces) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_edgeinterface.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_edgeinterface.go deleted file mode 100644 index 0772b7e..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/get_edgeinterface.go +++ /dev/null @@ -1,24 +0,0 @@ -package edgeinterface - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" - "strconv" -) - -// GetEdgeInterfaceAPI base object. -type GetEdgeInterfaceAPI struct { - *api.BaseAPI -} - -// NewGet returns the api object of GetEdgeInterfaceAPI -func NewGet(edgeID string, index int) *GetEdgeInterfaceAPI { - this := new(GetEdgeInterfaceAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/4.0/edges/"+edgeID+"/interfaces/"+strconv.Itoa(index), nil, new(EdgeInterface)) - return this -} - -// GetResponse returns ResponseObject of GetEdgeInterfaceAPI -func (getAPI GetEdgeInterfaceAPI) GetResponse() EdgeInterface { - return *getAPI.ResponseObject().(*EdgeInterface) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/update_edgeinterface.go b/vendor/github.com/sky-uk/gonsx/api/edgeinterface/update_edgeinterface.go deleted file mode 100644 index a4efa23..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/edgeinterface/update_edgeinterface.go +++ /dev/null @@ -1,19 +0,0 @@ -package edgeinterface - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" - "strconv" -) - -// UpdateEdgeInterfaceAPI struct -type UpdateEdgeInterfaceAPI struct { - *api.BaseAPI -} - -// NewUpdate returns a new delete method object of UpdateEdgeInterfaceAPI -func NewUpdate(edgeID string, index int, edge EdgeInterface) *UpdateEdgeInterfaceAPI { - this := new(UpdateEdgeInterfaceAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/4.0/edges/"+edgeID+"/interfaces/"+strconv.Itoa(index), edge, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/create_firewallexclusion.go b/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/create_firewallexclusion.go deleted file mode 100644 index a30a0db..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/create_firewallexclusion.go +++ /dev/null @@ -1,23 +0,0 @@ -package firewallexclusion - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateFirewallExclusionAPI base object. -type CreateFirewallExclusionAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreateFirewallExclusionAPI. -func NewCreate(moid string) *CreateFirewallExclusionAPI { - this := new(CreateFirewallExclusionAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/2.1/app/excludelist/"+moid, nil, nil) - return this -} - -// GetResponse returns ResponseObject of CreateFirewallExclusionAPI. -func (ga CreateFirewallExclusionAPI) GetResponse() string { - return ga.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/delete_firewallexclusion.go b/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/delete_firewallexclusion.go deleted file mode 100644 index 4486d46..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/delete_firewallexclusion.go +++ /dev/null @@ -1,23 +0,0 @@ -package firewallexclusion - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteFirewallExclusionAPI base object. -type DeleteFirewallExclusionAPI struct { - *api.BaseAPI -} - -// NewDelete returns a new object of DeleteFirewallExclusionAPI. -func NewDelete(moid string) *DeleteFirewallExclusionAPI { - this := new(DeleteFirewallExclusionAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.1/app/excludelist/"+moid, nil, nil) - return this -} - -// GetResponse returns ResponseObject of DeleteFirewallExclusionAPI. -func (ga DeleteFirewallExclusionAPI) GetResponse() string { - return ga.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types.go b/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types.go deleted file mode 100644 index 930fe8c..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types.go +++ /dev/null @@ -1,16 +0,0 @@ -package firewallexclusion - -import "encoding/xml" - -// FirewallExclusions - top level xml element -type FirewallExclusions struct { - XMLName xml.Name `xml:"VshieldAppConfiguration"` - Members []Member `xml:"excludeListConfiguration>excludeMember>member"` -} - -// Member object -type Member struct { - XMLName xml.Name `xml:"member"` - MOID string `xml:"objectId"` - Name string `xml:"name"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types_functions.go deleted file mode 100644 index a9072de..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/firewallexclusion_types_functions.go +++ /dev/null @@ -1,35 +0,0 @@ -package firewallexclusion - -import ( - "fmt" -) - -func (a FirewallExclusions) String() string { - return fmt.Sprintf("FirewallExclusions object, contains member objects.") -} - -func (a Member) String() string { - return fmt.Sprintf("MOID: %-20s name: %-20s.", a.MOID, a.Name) -} - -// FilterByMOID returns a single member object if it matches the moid in FirewallExclusions -func (a FirewallExclusions) FilterByMOID(moid string) *Member { - var memberFound Member - for _, member := range a.Members { - if member.MOID == moid { - memberFound = member - break - } - } - return &memberFound -} - -// CheckByMOID - Returns true or false depending if moid is in FirewallExclusions -func (a FirewallExclusions) CheckByMOID(moid string) bool { - for _, member := range a.Members { - if member.MOID == moid { - return true - } - } - return false -} diff --git a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/get_all_firewallexclusion.go b/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/get_all_firewallexclusion.go deleted file mode 100644 index 180c003..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/firewallexclusion/get_all_firewallexclusion.go +++ /dev/null @@ -1,23 +0,0 @@ -package firewallexclusion - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllFirewallExclusionAPI base object. -type GetAllFirewallExclusionAPI struct { - *api.BaseAPI -} - -// NewGetAll returns a new object of GetAllFirewallExclusionAPI. -func NewGetAll() *GetAllFirewallExclusionAPI { - this := new(GetAllFirewallExclusionAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.1/app/excludelist", nil, new(FirewallExclusions)) - return this -} - -// GetResponse returns ResponseObject of GetAllIpSetAPI. -func (ga GetAllFirewallExclusionAPI) GetResponse() *FirewallExclusions { - return ga.ResponseObject().(*FirewallExclusions) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/nsx_api.go b/vendor/github.com/sky-uk/gonsx/api/nsx_api.go deleted file mode 100644 index ddfc5ac..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/nsx_api.go +++ /dev/null @@ -1,16 +0,0 @@ -package api - -// NSXApi object. -type NSXApi interface { - Method() string - Endpoint() string - RequestObject() interface{} - ResponseObject() interface{} - StatusCode() int - RawResponse() []byte - Error() error - - SetResponseObject(interface{}) - SetStatusCode(int) - SetRawResponse([]byte) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/create_securitygroup.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/create_securitygroup.go deleted file mode 100644 index 2db074b..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/create_securitygroup.go +++ /dev/null @@ -1,27 +0,0 @@ -package securitygroup - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateSecurityGroupAPI api object -type CreateSecurityGroupAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreateSecurityGroupAPI. -func NewCreate(scopeID, securityGroupName string, dynamicMemberDefinition *DynamicMemberDefinition) *CreateSecurityGroupAPI { - this := new(CreateSecurityGroupAPI) - requestPayload := new(SecurityGroup) - requestPayload.Name = securityGroupName - - requestPayload.DynamicMemberDefinition = dynamicMemberDefinition - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/2.0/services/securitygroup/bulk/"+scopeID, requestPayload, new(string)) - return this -} - -// GetResponse returns a ResponseObject of CreateSecurityGroupAPI. -func (ca CreateSecurityGroupAPI) GetResponse() string { - return ca.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/delete_securitygroup.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/delete_securitygroup.go deleted file mode 100644 index 5d9f624..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/delete_securitygroup.go +++ /dev/null @@ -1,18 +0,0 @@ -package securitygroup - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteSecurityGroupAPI base object. -type DeleteSecurityGroupAPI struct { - *api.BaseAPI -} - -// NewDelete returns a new object of DeleteSecurityGroupAPI. -func NewDelete(securityGroupID string) *DeleteSecurityGroupAPI { - this := new(DeleteSecurityGroupAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.0/services/securitygroup/"+securityGroupID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/get_all_securitygroup.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/get_all_securitygroup.go deleted file mode 100644 index f64e523..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/get_all_securitygroup.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitygroup - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllSecurityGroupAPI base object. -type GetAllSecurityGroupAPI struct { - *api.BaseAPI -} - -// NewGetAll returns a new object of GetAllSecurityGroupAPI. -func NewGetAll(scopeID string) *GetAllSecurityGroupAPI { - this := new(GetAllSecurityGroupAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/securitygroup/scope/"+scopeID, nil, new(List)) - return this -} - -// GetResponse returns ResponseObject of GetAllSecurityGroupAPI. -func (ga GetAllSecurityGroupAPI) GetResponse() *List { - return ga.ResponseObject().(*List) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types.go deleted file mode 100644 index c648c92..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types.go +++ /dev/null @@ -1,41 +0,0 @@ -package securitygroup - -import "encoding/xml" - -// List - top level element -type List struct { - XMLName xml.Name `xml:"list"` - SecurityGroups []SecurityGroup `xml:"securitygroup"` -} - -// SecurityGroup - element of -type SecurityGroup struct { - XMLName xml.Name `xml:"securitygroup"` - ObjectID string `xml:"objectId,omitempty"` - ObjectTypeName string `xml:"objectTypeName,omitempty"` - Revision string `xml:"revision,omitempty"` - Type string `xml:"type,omitempty>typeName,omitempty"` - Name string `xml:"name"` - InheritanceAllowed bool `xml:"inheritanceAllowed,omitempty"` - DynamicMemberDefinition *DynamicMemberDefinition `xml:"dynamicMemberDefinition,omitempty"` -} - -// DynamicMemberDefinition - element of -type DynamicMemberDefinition struct { - DynamicSet []DynamicSet `xml:"dynamicSet,omitempty"` -} - -// DynamicSet - element of -type DynamicSet struct { - Operator string `xml:"operator,omitempty"` - DynamicCriteria []DynamicCriteria `xml:"dynamicCriteria,omitempty"` -} - -// DynamicCriteria - element of -type DynamicCriteria struct { - Operator string `xml:"operator"` - Key string `xml:"key"` - Criteria string `xml:"criteria"` - Value string `xml:"value"` - IsValid bool `xml:"isValid,omitempty"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types_functions.go deleted file mode 100644 index 8b074a8..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/securitygroup_types_functions.go +++ /dev/null @@ -1,65 +0,0 @@ -package securitygroup - -import ( - "fmt" -) - -func (sgl List) String() string { - return fmt.Sprintf("SecurityGroupList object, contains SecurityGroup objects.") -} - -func (sg SecurityGroup) String() string { - return fmt.Sprintf("objectId: %-20s name: %-20s.", sg.ObjectID, sg.Name) -} - -// FilterByName returns a single securitygroup object if it matches the name in SecurityGroup -func (sgl List) FilterByName(name string) *SecurityGroup { - var securityGroupFound SecurityGroup - for _, sg := range sgl.SecurityGroups { - if sg.Name == name { - securityGroupFound = sg - break - } - } - return &securityGroupFound -} - -// AddDynamicMemberDefinitionSet adds new DynamicSet to DynamicMemberDefinition of SecurityGroup object. -func (sg *SecurityGroup) AddDynamicMemberDefinitionSet(operator string, dynamicCriteriaList []DynamicCriteria) { - newDynamicSet := DynamicSet{Operator: operator, DynamicCriteria: dynamicCriteriaList} - if sg.DynamicMemberDefinition != nil && len(sg.DynamicMemberDefinition.DynamicSet) >= 1 { - sg.DynamicMemberDefinition.DynamicSet = append(sg.DynamicMemberDefinition.DynamicSet, newDynamicSet) - } else { - dynamicSetList := []DynamicSet{newDynamicSet} - sg.DynamicMemberDefinition = &DynamicMemberDefinition{DynamicSet: dynamicSetList} - } - -} - -// NewDynamicCriteria creates a new dynamic criteria and returns it. -func NewDynamicCriteria(operator, key, value, criteria string) *DynamicCriteria { - newDynamicCriteria := DynamicCriteria{ - Operator: operator, - Key: key, - Value: value, - Criteria: criteria, - } - return &newDynamicCriteria -} - -// NewDynamicSet returns a list of dynamic criteria and their operators. -func NewDynamicSet(operator string, dynamicCriteriaList []DynamicCriteria) *DynamicSet { - newDynamicSet := DynamicSet{ - Operator: operator, - DynamicCriteria: dynamicCriteriaList, - } - return &newDynamicSet -} - -// NewDynamicMemberDefinition returns a list of dynamic sets -func NewDynamicMemberDefinition(dynamicSetList []DynamicSet) *DynamicMemberDefinition { - newDynamicMemberDefinition := DynamicMemberDefinition{ - DynamicSet: dynamicSetList, - } - return &newDynamicMemberDefinition -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitygroup/update_securitygroup.go b/vendor/github.com/sky-uk/gonsx/api/securitygroup/update_securitygroup.go deleted file mode 100644 index ed2f321..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitygroup/update_securitygroup.go +++ /dev/null @@ -1,24 +0,0 @@ -package securitygroup - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateSecurityGroupAPI object -type UpdateSecurityGroupAPI struct { - *api.BaseAPI -} - -// NewUpdate creates a new object of UpdateSecurityGroupAPI -func NewUpdate(securityGroupID string, securityGroupPayload *SecurityGroup) *UpdateSecurityGroupAPI { - this := new(UpdateSecurityGroupAPI) - endpointURL := "/api/2.0/services/securitygroup/bulk/" + securityGroupID - this.BaseAPI = api.NewBaseAPI(http.MethodPut, endpointURL, securityGroupPayload, new(SecurityGroup)) - return this -} - -// GetResponse returns the ResponseObject from UpdateServiceAPI -func (updateAPI UpdateSecurityGroupAPI) GetResponse() *SecurityGroup { - return updateAPI.ResponseObject().(*SecurityGroup) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/create_securitypolicy.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/create_securitypolicy.go deleted file mode 100644 index 1bf3a65..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/create_securitypolicy.go +++ /dev/null @@ -1,36 +0,0 @@ -package securitypolicy - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateSecurityPolicyAPI api object -type CreateSecurityPolicyAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreatePolicyAPI. -func NewCreate(name, precedence, description string, securityGroups []string, actions []Action) *CreateSecurityPolicyAPI { - this := new(CreateSecurityPolicyAPI) - requestPayload := new(SecurityPolicy) - requestPayload.Name = name - requestPayload.Precedence = precedence - requestPayload.Description = description - requestPayload.SecurityGroupBinding = []SecurityGroup{} - - var securityGroupBindingList = []SecurityGroup{} - for _, secGroupID := range securityGroups { - securityGroupBinding := SecurityGroup{ObjectID: secGroupID} - securityGroupBindingList = append(securityGroupBindingList, securityGroupBinding) - } - requestPayload.SecurityGroupBinding = securityGroupBindingList - requestPayload.ActionsByCategory = ActionsByCategory{Actions: actions} - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/2.0/services/policy/securitypolicy", requestPayload, new(string)) - return this -} - -// GetResponse returns a ResponseObject of CreateServiceAPI. -func (ca CreateSecurityPolicyAPI) GetResponse() string { - return ca.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/delete_securitypolicy.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/delete_securitypolicy.go deleted file mode 100644 index 555816d..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/delete_securitypolicy.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitypolicy - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteSecurityPolicyAPI - struct -type DeleteSecurityPolicyAPI struct { - *api.BaseAPI -} - -// NewDelete - Generates a new DeleteSecurityPolicyAPI object. -func NewDelete(securityPolicyID string, force bool) *DeleteSecurityPolicyAPI { - this := new(DeleteSecurityPolicyAPI) - url := "/api/2.0/services/policy/securitypolicy/" + securityPolicyID + "?force=false" - - if force { - url = "/api/2.0/services/policy/securitypolicy/" + securityPolicyID + "?force=true" - } - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, url, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_all_securitypolicy.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_all_securitypolicy.go deleted file mode 100644 index b5ae99d..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_all_securitypolicy.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitypolicy - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllSecurityPoliciesAPI - struct -type GetAllSecurityPoliciesAPI struct { - *api.BaseAPI -} - -// NewGetAll - returns GetAll api object of GetAllSecurityPoliciesAPI type. -func NewGetAll() *GetAllSecurityPoliciesAPI { - this := new(GetAllSecurityPoliciesAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/policy/securitypolicy/all", nil, new(SecurityPolicies)) - return this -} - -// GetResponse - Returns ResponseObject from GetAllSecurityPoliciesAPI. -func (getAllAPI GetAllSecurityPoliciesAPI) GetResponse() *SecurityPolicies { - return getAllAPI.ResponseObject().(*SecurityPolicies) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_securitypolicy.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_securitypolicy.go deleted file mode 100644 index 64ba169..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/get_securitypolicy.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitypolicy - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetSecurityPolicyAPI base object. -type GetSecurityPolicyAPI struct { - *api.BaseAPI -} - -// NewGet returns new object of GetSecurityPolicyAPI. -func NewGet(securityPolicyID string) *GetSecurityPolicyAPI { - this := new(GetSecurityPolicyAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/policy/securitypolicy/"+securityPolicyID, nil, new(SecurityPolicy)) - return this -} - -// GetResponse returns ResponseObject of GetSecurityPolicyAPI. -func (ga GetSecurityPolicyAPI) GetResponse() *SecurityPolicy { - return ga.ResponseObject().(*SecurityPolicy) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types.go deleted file mode 100644 index 7dbc7a7..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types.go +++ /dev/null @@ -1,69 +0,0 @@ -package securitypolicy - -import "encoding/xml" - -// SecurityPolicies top level struct -type SecurityPolicies struct { - SecurityPolicies []SecurityPolicy `xml:"securityPolicy"` -} - -// SecurityPolicy object struct -type SecurityPolicy struct { - XMLName xml.Name `xml:"securityPolicy"` - ObjectID string `xml:"objectId,omitempty"` - ObjectTypeName string `xml:"objectTypeName,omitempty"` - VsmUUID string `xml:"vsmUuid,omitempty"` - NodeID string `xml:"nodeId,omitempty"` - Revision int `xml:"revision,omitempty"` - TypeName string `xml:"type,omitempty>typeName,omitempty"` - Name string `xml:"name,omitempty"` - Description string `xml:"description,omitempty"` - Precedence string `xml:"precedence"` - IsUniversal bool `xml:"isUniversal,omitempty"` - InheritanceAllowed bool `xml:"inheritanceAllowed,omitempty"` - ActionsByCategory ActionsByCategory `xml:"actionsByCategory,omitempty"` - SecurityGroupBinding []SecurityGroup `xml:"securityGroupBinding,omitempty"` -} - -// SecurityGroup object struct -type SecurityGroup struct { - ObjectID string `xml:"objectId,omitempty"` -} - -// ActionsByCategory element of SecurityPolicy. -type ActionsByCategory struct { - XMLName xml.Name `xml:"actionsByCategory"` - Category string `xml:"category,omitempty"` - Actions []Action `xml:"action,omitempty"` -} - -// Action element of ActionsByCategory list. -type Action struct { - XMLName xml.Name `xml:"action"` - Class string `xml:"class,attr"` - ObjectID string `xml:"objectId,omitempty"` - ObjectTypeName string `xml:"objectTypeName,omitempty"` - VsmUUID string `xml:"vsmUuid,omitempty"` - NodeID string `xml:"nodeId,omitempty"` - Revision int `xml:"revision,omitempty"` - TypeName string `xml:"type,omitempty>typeName,omitempty"` - Name string `xml:"name,omitempty"` - Action string `xml:"action"` - Category string `xml:"category"` - Direction string `xml:"direction"` - IsEnabled bool `xml:"isEnabled,omitempty"` - SecondarySecurityGroup []SecurityGroup `xml:"secondarySecurityGroup,omitempty"` - Applications *Applications `xml:"applications,omitempty"` -} - -//Applications object struct -type Applications struct { - Applications []Application `xml:"application,omitempty"` -} - -// Application element of Applications List -type Application struct { - //XMLName xml.Name `xml:"application,omitempty"` - ObjectID string `xml:"objectId,omitempty"` - Name string `xml:"name,omitempty"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types_functions.go deleted file mode 100644 index e3f9c32..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/securitypolicy_types_functions.go +++ /dev/null @@ -1,218 +0,0 @@ -package securitypolicy - -import ( - "encoding/xml" - "errors" - "fmt" -) - -func (sp SecurityPolicy) String() string { - return fmt.Sprintf("SecurityPolicy with objectId: %s", sp.ObjectID) -} - -// MarshalToXML converts the object into XML -func (sp SecurityPolicy) MarshalToXML() string { - xmlBytes, _ := xml.Marshal(sp) - return string(xmlBytes) -} - -// AddSecurityGroupBinding - Adds security group to list of SecurityGroupBinding if it doesn't exists. -func (sp *SecurityPolicy) AddSecurityGroupBinding(objectID string) { - for _, secGroup := range sp.SecurityGroupBinding { - if secGroup.ObjectID == objectID { - return - } - } - // if we reached here that means we couldn't find one, and let's add the sec group. - sp.SecurityGroupBinding = append(sp.SecurityGroupBinding, SecurityGroup{ObjectID: objectID}) - return -} - -// RemoveSecurityGroupBinding - Adds security group to list of SecurityGroupBinding if it doesn't exists. -func (sp *SecurityPolicy) RemoveSecurityGroupBinding(objectID string) { - for idx, secGroup := range sp.SecurityGroupBinding { - if secGroup.ObjectID == objectID { - sp.SecurityGroupBinding = append(sp.SecurityGroupBinding[:idx], sp.SecurityGroupBinding[idx+1:]...) - return - } - } - return -} - -// CheckFirewallRuleByUUID - Checks if the rule with UUID exists in the firewall rules of security policy. -func (sp *SecurityPolicy) CheckFirewallRuleByUUID(uuid string) bool { - for _, action := range sp.ActionsByCategory.Actions { - if action.VsmUUID == uuid { - return true - } - } - return false -} - -// GetFirewallRuleByName - Checks if the rule with given name exists in the firewall rules of security policy. -func (sp *SecurityPolicy) GetFirewallRuleByName(name string) *Action { - var actionFound Action - for _, action := range sp.ActionsByCategory.Actions { - if action.Name == name { - actionFound = action - break - } - } - return &actionFound -} - -// GetFirewallRuleByUUID - Checks if the rule with given name exists in the firewall rules of security policy. -func (sp *SecurityPolicy) GetFirewallRuleByUUID(uuid string) *Action { - var actionFound Action - for _, action := range sp.ActionsByCategory.Actions { - if action.VsmUUID == uuid { - actionFound = action - break - } - } - return &actionFound -} - -// RemoveFirewallActionByName - Removes the firewalla ction from security policy object if it exists. -func (sp *SecurityPolicy) RemoveFirewallActionByName(actionName string) { - for idx, action := range sp.ActionsByCategory.Actions { - if action.Name == actionName { - sp.ActionsByCategory.Actions = append(sp.ActionsByCategory.Actions[:idx], sp.ActionsByCategory.Actions[idx+1:]...) - return - } - } -} - -// RemoveFirewallActionByUUID - Removes the firewall action from security policy object if it exists by it's UUID. -func (sp *SecurityPolicy) RemoveFirewallActionByUUID(uuid string) { - for idx, action := range sp.ActionsByCategory.Actions { - if action.VsmUUID == uuid { - sp.ActionsByCategory.Actions = append(sp.ActionsByCategory.Actions[:idx], sp.ActionsByCategory.Actions[idx+1:]...) - return - } - } -} - -// AddOutboundFirewallAction adds outbound firewall action rule into security policy. -func (sp *SecurityPolicy) AddOutboundFirewallAction(name, action, direction string, secGroupObjectIDs, applicationObjectIDs []string) error { - if action != "allow" && action != "block" { - return errors.New("Action can be only 'allow' or 'block'") - } - if direction != "outbound" { - return errors.New("Direction can only be 'outbound'") - } - - var secondarySecurityGroupList = []SecurityGroup{} - for _, secGroupID := range secGroupObjectIDs { - securityGroup := SecurityGroup{ObjectID: secGroupID} - secondarySecurityGroupList = append(secondarySecurityGroupList, securityGroup) - } - - var secondaryApplicationsList = &Applications{} - - if applicationObjectIDs[0] != "any" { - var secondaryApplicationList = []Application{} - for _, applicationObjectID := range applicationObjectIDs { - application := Application{ObjectID: applicationObjectID} - secondaryApplicationList = append(secondaryApplicationList, application) - } - - secondaryApplicationsList.Applications = secondaryApplicationList - } else { - secondaryApplicationsList = nil - } - - newAction := Action{ - Class: "firewallSecurityAction", - Name: name, - Action: action, - Category: "firewall", - Direction: direction, - IsEnabled: true, - SecondarySecurityGroup: secondarySecurityGroupList, - Applications: secondaryApplicationsList, - } - - if sp.ActionsByCategory.Category == "firewall" && len(sp.ActionsByCategory.Actions) >= 1 { - sp.ActionsByCategory.Actions = append(sp.ActionsByCategory.Actions, newAction) - return nil - } - - // Build actionsByCategory list. - actionsByCategory := ActionsByCategory{Category: "firewall"} - actionsByCategory.Actions = []Action{newAction} - sp.ActionsByCategory = actionsByCategory - return nil -} - -// AddInboundFirewallAction adds outbound firewall action rule into security policy. -func (sp *SecurityPolicy) AddInboundFirewallAction(name, action, direction string, applicationObjectIDs []string) error { - if action != "allow" && action != "block" { - return errors.New("Action can be only 'allow' or 'block'") - } - if direction != "inbound" { - return errors.New("Direction can only be 'inbound'") - } - - var secondaryApplicationsList = &Applications{} - - if applicationObjectIDs[0] != "any" { - var secondaryApplicationList = []Application{} - for _, applicationObjectID := range applicationObjectIDs { - application := Application{ObjectID: applicationObjectID} - secondaryApplicationList = append(secondaryApplicationList, application) - } - - secondaryApplicationsList.Applications = secondaryApplicationList - } else { - secondaryApplicationsList = nil - } - - newAction := Action{ - Class: "firewallSecurityAction", - Name: name, - Action: action, - Category: "firewall", - Direction: direction, - IsEnabled: true, - Applications: secondaryApplicationsList, - } - - if sp.ActionsByCategory.Category == "firewall" && len(sp.ActionsByCategory.Actions) >= 1 { - sp.ActionsByCategory.Actions = append(sp.ActionsByCategory.Actions, newAction) - return nil - } - - // Build actionsByCategory list. - actionsByCategory := ActionsByCategory{Category: "firewall"} - actionsByCategory.Actions = []Action{newAction} - sp.ActionsByCategory = actionsByCategory - return nil -} - -func (spList SecurityPolicies) String() string { - return fmt.Sprint("SecurityPolicies object, contains security policy objects.") -} - -// FilterByName returns a single security policy object if it matches the name in SecurityPolicies list. -func (spList SecurityPolicies) FilterByName(name string) *SecurityPolicy { - var securityPolicyFound SecurityPolicy - for _, securityPolicy := range spList.SecurityPolicies { - if securityPolicy.Name == name { - securityPolicyFound = securityPolicy - break - } - } - return &securityPolicyFound -} - -// RemoveSecurityPolicyByName - Removes the SecurityPolicy from a list of SecurityPolicies provided it matches the given name. -func (spList SecurityPolicies) RemoveSecurityPolicyByName(policyName string) *SecurityPolicies { - for idx, securityPolicy := range spList.SecurityPolicies { - if securityPolicy.Name == policyName { - spList.SecurityPolicies = append(spList.SecurityPolicies[:idx], spList.SecurityPolicies[idx+1:]...) - break - } - } - return &spList -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/update_securitypolicy.go b/vendor/github.com/sky-uk/gonsx/api/securitypolicy/update_securitypolicy.go deleted file mode 100644 index 15f1fae..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitypolicy/update_securitypolicy.go +++ /dev/null @@ -1,24 +0,0 @@ -package securitypolicy - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateSecurityPolicyAPI ... -type UpdateSecurityPolicyAPI struct { - *api.BaseAPI -} - -// NewUpdate creates a new object of UpdateSecurityPolicyAPI -func NewUpdate(securityPolicyID string, securityPolicyPayload *SecurityPolicy) *UpdateSecurityPolicyAPI { - this := new(UpdateSecurityPolicyAPI) - endpointURL := "/api/2.0/services/policy/securitypolicy/" + securityPolicyID - this.BaseAPI = api.NewBaseAPI(http.MethodPut, endpointURL, securityPolicyPayload, new(string)) - return this -} - -// GetResponse returns the ResponseObject from UpdateSecurityPolicyAPI -func (updateAPI UpdateSecurityPolicyAPI) GetResponse() string { - return updateAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/assign_securitytag.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/assign_securitytag.go deleted file mode 100644 index 86bf167..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/assign_securitytag.go +++ /dev/null @@ -1,18 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// AssignSecurityTagAPI struct -type AssignSecurityTagAPI struct { - *api.BaseAPI -} - -// NewAssign - Generates a new AssignSecurityTagAPI object. -func NewAssign(securityTagID, vmID string) *AssignSecurityTagAPI { - this := new(AssignSecurityTagAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/2.0/services/securitytags/tag/"+securityTagID+"/vm/"+vmID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/create_securitytag.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/create_securitytag.go deleted file mode 100644 index 3a9434f..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/create_securitytag.go +++ /dev/null @@ -1,28 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateSecurityTagAPI - struct -type CreateSecurityTagAPI struct { - *api.BaseAPI -} - -// NewCreate - Generates a new CreateSecurityTagAPI object. -func NewCreate(name, desc string) *CreateSecurityTagAPI { - this := new(CreateSecurityTagAPI) - requestPayload := new(SecurityTag) - requestPayload.Name = name - requestPayload.Description = desc - // TODO: need to make it argument - requestPayload.TypeName = "SecurityTag" - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/2.0/services/securitytags/tag", requestPayload, new(string)) - return this -} - -// GetResponse returns the ResponseObject from CreateSecurityTagAPI -func (createAPI CreateSecurityTagAPI) GetResponse() string { - return createAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/delete_securitytag.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/delete_securitytag.go deleted file mode 100644 index 1620d72..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/delete_securitytag.go +++ /dev/null @@ -1,18 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteSecurityTagAPI - struct -type DeleteSecurityTagAPI struct { - *api.BaseAPI -} - -// NewDelete - Generates a new DeleteSecurityTagApi object. -func NewDelete(securityTagID string) *DeleteSecurityTagAPI { - this := new(DeleteSecurityTagAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.0/services/securitytags/tag/"+securityTagID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/detach_securitytag.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/detach_securitytag.go deleted file mode 100644 index eec0b04..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/detach_securitytag.go +++ /dev/null @@ -1,18 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DetachSecurityTagAPI - struct -type DetachSecurityTagAPI struct { - *api.BaseAPI -} - -// NewDetach - Generates a new DetachSecurityTagAPI object. -func NewDetach(securityTagID, vmID string) *DetachSecurityTagAPI { - this := new(DetachSecurityTagAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.0/services/securitytags/tag/"+securityTagID+"/vm/"+vmID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_attached_securitytags.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_attached_securitytags.go deleted file mode 100644 index a7ca701..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_attached_securitytags.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllAttachedSecurityTagsAPI - struct -type GetAllAttachedSecurityTagsAPI struct { - *api.BaseAPI -} - -// NewGetAllAttached - Generates a new GetAllSecurityTagsAttachedAPI object. -func NewGetAllAttached(tagID string) *GetAllAttachedSecurityTagsAPI { - this := new(GetAllAttachedSecurityTagsAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/securitytags/tag/"+tagID+"/vm", nil, new(BasicInfoList)) - return this -} - -// GetResponse returns the ResponseObject from GetAllAttachedSecurityTagsAPI -func (getAPI GetAllAttachedSecurityTagsAPI) GetResponse() *BasicInfoList { - return getAPI.ResponseObject().(*BasicInfoList) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags.go deleted file mode 100644 index 6cff5ff..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllSecurityTagsAPI - struct -type GetAllSecurityTagsAPI struct { - *api.BaseAPI -} - -// NewGetAll - Generates a new GetAllSecurityTagsAPI object. -func NewGetAll() *GetAllSecurityTagsAPI { - this := new(GetAllSecurityTagsAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/securitytags/tag", nil, new(SecurityTags)) - return this -} - -// GetResponse returns the ResponseObject from CreateSecurityTagAPI -func (getAPI GetAllSecurityTagsAPI) GetResponse() *SecurityTags { - return getAPI.ResponseObject().(*SecurityTags) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags_attached_to_vm.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags_attached_to_vm.go deleted file mode 100644 index b574dc6..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/get_all_securitytags_attached_to_vm.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllSecurityTagsAttachedToVMAPI - struct -type GetAllSecurityTagsAttachedToVMAPI struct { - *api.BaseAPI -} - -// NewGetAllAttachedToVM - Generates a new GetAllSecurityTagsAttachedToVMAPI object. -func NewGetAllAttachedToVM(vmID string) *GetAllSecurityTagsAttachedToVMAPI { - this := new(GetAllSecurityTagsAttachedToVMAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/securitytags/vm/"+vmID, nil, new(SecurityTags)) - return this -} - -// GetResponse - returns the ResponseObject from GetAllSecurityTagsAttachedToVMAPI -func (getAPI GetAllSecurityTagsAttachedToVMAPI) GetResponse() *SecurityTags { - return getAPI.ResponseObject().(*SecurityTags) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types.go deleted file mode 100644 index 7009bba..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types.go +++ /dev/null @@ -1,41 +0,0 @@ -package securitytag - -import "encoding/xml" - -// SecurityTags top level struct -type SecurityTags struct { - SecurityTags []SecurityTag `xml:"securityTag"` -} - -// SecurityTag object struct -type SecurityTag struct { - XMLName xml.Name `xml:"securityTag"` - ObjectID string `xml:"objectId,omitempty"` - Name string `xml:"name"` - Description string `xml:"description"` - TypeName string `xml:"type>typeName"` - Revision int `xml:"revision,omitempty"` -} - -// AttachmentList struct -type AttachmentList struct { - XMLName xml.Name `xml:"securityTags"` - SecurityTagAttachments []Attachment `xml:"securityTag"` -} - -// Attachment object struct -type Attachment struct { - //XMLName xml.Name `xml:"securityTag"` - ObjectID string `xml:"objectId"` -} - -// BasicInfoList struct to get info of vms attached to tags -type BasicInfoList struct { - BasicInfoList []BasicInfo `xml:"basicinfo"` -} - -// BasicInfo gives info of list -type BasicInfo struct { - ObjectID string `xml:"objectId"` - Name string `xml:"name"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types_functions.go deleted file mode 100644 index 236ae8f..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/securitytags_types_functions.go +++ /dev/null @@ -1,72 +0,0 @@ -package securitytag - -import "fmt" - -func (s SecurityTags) String() string { - var returnString = "Security tags contains a list of securitytags" - return returnString -} - -func (s SecurityTag) String() string { - return fmt.Sprintf("Security tag name %s and id %s", s.Name, s.ObjectID) -} - -// FilterByName - Filters the SecurityTags->SecurityTag with provided name -func (s SecurityTags) FilterByName(name string) *SecurityTag { - var securityTagFound SecurityTag - for _, securityTag := range s.SecurityTags { - if securityTag.Name == name { - securityTagFound = securityTag - break - } - } - return &securityTagFound -} - -// CheckByName - Returns true or false depending if name is in securityTags -func (s SecurityTags) CheckByName(name string) bool { - for _, securityTag := range s.SecurityTags { - if securityTag.Name == name { - return true - } - } - return false -} - -// FilterByIDAttached - Filters BasicInfoList->BasicInfo with provided name -func (b BasicInfoList) FilterByIDAttached(id string) *BasicInfo { - var basicInfoFound BasicInfo - for _, basicInfo := range b.BasicInfoList { - if basicInfo.ObjectID == id { - basicInfoFound = basicInfo - break - } - } - return &basicInfoFound -} - -// AddSecurityTagToAttachmentList - Adds a SecurityTagAttachment to a SecurityTagAttachmentList -func (s *AttachmentList) AddSecurityTagToAttachmentList(st Attachment) { - s.SecurityTagAttachments = append(s.SecurityTagAttachments, st) -} - -// CheckByObjectID - Checks if a specific securityTagAttachment is within a SecurityTagAttachmentList -func (s *AttachmentList) CheckByObjectID(objectID string) bool { - for _, securityTagAttachment := range s.SecurityTagAttachments { - if securityTagAttachment.ObjectID == objectID { - return true - } - } - return false -} - -// VerifyAttachments - Returns a list of tags that need to be detached from the VM -func (s *AttachmentList) VerifyAttachments(originalTags *SecurityTags) []string { - var tagsToRemove []string - for _, securityTag := range originalTags.SecurityTags { - if s.CheckByObjectID(securityTag.ObjectID) == false { - tagsToRemove = append(tagsToRemove, securityTag.ObjectID) - } - } - return tagsToRemove -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/update_attached_securitytags.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/update_attached_securitytags.go deleted file mode 100644 index 01d314b..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/update_attached_securitytags.go +++ /dev/null @@ -1,24 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateAttachedSecurityTagsAPI - struct -type UpdateAttachedSecurityTagsAPI struct { - *api.BaseAPI -} - -// NewUpdateAttachedTags - Generates a NewUpdateAttachedSecurityTagsAPI object. -func NewUpdateAttachedTags(vmID string, securityTagPayload *AttachmentList) *UpdateAttachedSecurityTagsAPI { - this := new(UpdateAttachedSecurityTagsAPI) - endpointURL := "/api/2.0/services/securitytags/vm/" + vmID + "?action=ASSIGN_TAGS" - this.BaseAPI = api.NewBaseAPI(http.MethodPost, endpointURL, securityTagPayload, nil) - return this -} - -// GetResponse returns the ResponseObject from CreateSecurityTagAPI -func (updateAPI UpdateAttachedSecurityTagsAPI) GetResponse() string { - return updateAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/securitytag/update_securitytag.go b/vendor/github.com/sky-uk/gonsx/api/securitytag/update_securitytag.go deleted file mode 100644 index f4e7e07..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/securitytag/update_securitytag.go +++ /dev/null @@ -1,23 +0,0 @@ -package securitytag - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -//UpdateSecurityTagAPI - struct -type UpdateSecurityTagAPI struct { - *api.BaseAPI -} - -//NewUpdate - Generates a new UpdateSecurityTagAPI object. -func NewUpdate(securityTagID string, securityTagPayload *SecurityTag) *UpdateSecurityTagAPI { - this := new(UpdateSecurityTagAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/2.0/services/securitytags/tag/"+securityTagID, securityTagPayload, new(string)) - return this -} - -// GetResponse returns the ResponseObject from UpdateSecurityTagAPI -func (updateAPI UpdateSecurityTagAPI) GetResponse() string { - return updateAPI.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/create_service.go b/vendor/github.com/sky-uk/gonsx/api/service/create_service.go deleted file mode 100644 index baa43f2..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/create_service.go +++ /dev/null @@ -1,30 +0,0 @@ -package service - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateServiceAPI api object -type CreateServiceAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreateServiceAPI. -func NewCreate(scopeID, name, desc, proto, ports string) *CreateServiceAPI { - this := new(CreateServiceAPI) - requestPayload := new(ApplicationService) - requestPayload.Name = name - requestPayload.Description = desc - - element := Element{ApplicationProtocol: proto, Value: ports} - requestPayload.Element = []Element{element} - - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/2.0/services/application/"+scopeID, requestPayload, new(string)) - return this -} - -// GetResponse returns a ResponseObject of CreateServiceAPI. -func (ca CreateServiceAPI) GetResponse() string { - return ca.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/delete_service.go b/vendor/github.com/sky-uk/gonsx/api/service/delete_service.go deleted file mode 100644 index c4bdd85..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/delete_service.go +++ /dev/null @@ -1,18 +0,0 @@ -package service - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteServiceAPI base object. -type DeleteServiceAPI struct { - *api.BaseAPI -} - -// NewDelete returns a new object of DeleteServiceAPI. -func NewDelete(serviceID string) *DeleteServiceAPI { - this := new(DeleteServiceAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.0/services/application/"+serviceID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/get_all_service.go b/vendor/github.com/sky-uk/gonsx/api/service/get_all_service.go deleted file mode 100644 index bc2f393..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/get_all_service.go +++ /dev/null @@ -1,23 +0,0 @@ -package service - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllServiceAPI base object. -type GetAllServiceAPI struct { - *api.BaseAPI -} - -// NewGetAll returns a new object of GetAllServiceAPI. -func NewGetAll(scopeID string) *GetAllServiceAPI { - this := new(GetAllServiceAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/services/application/scope/"+scopeID, nil, new(ApplicationsList)) - return this -} - -// GetResponse returns ResponseObject of GetAllServiceAPI. -func (ga GetAllServiceAPI) GetResponse() *ApplicationsList { - return ga.ResponseObject().(*ApplicationsList) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/service_types.go b/vendor/github.com/sky-uk/gonsx/api/service/service_types.go deleted file mode 100644 index be89b43..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/service_types.go +++ /dev/null @@ -1,27 +0,0 @@ -package service - -import "encoding/xml" - -// ApplicationsList top level element. -type ApplicationsList struct { - XMLName xml.Name `xml:"list"` - Applications []ApplicationService `xml:"application"` -} - -// ApplicationService - object within ApplicationsList. -type ApplicationService struct { - XMLName xml.Name `xml:"application"` - Name string `xml:"name"` - ObjectID string `xml:"objectId,omitempty"` - Type string `xml:"type,omitempty>TypeName,omitempty"` - Revision int `xml:"revision,omitempty"` - Description string `xml:"description"` - Element []Element `xml:"element"` -} - -// Element - object within ApplicationService -type Element struct { - XMLName xml.Name `xml:"element"` - ApplicationProtocol string `xml:"applicationProtocol"` - Value string `xml:"value"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/service_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/service/service_types_functions.go deleted file mode 100644 index ed55071..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/service_types_functions.go +++ /dev/null @@ -1,35 +0,0 @@ -package service - -import ( - "fmt" -) - -func (a ApplicationsList) String() string { - return fmt.Sprintf("ApplicationsList object, contains service objects.") -} - -func (a ApplicationService) String() string { - return fmt.Sprintf("objectId: %-20s name: %-20s.", a.ObjectID, a.Name) -} - -// FilterByName returns a single service object if it matches the name in ApplicationsList -func (a ApplicationsList) FilterByName(name string) *ApplicationService { - var serviceFound ApplicationService - for _, service := range a.Applications { - if service.Name == name { - serviceFound = service - break - } - } - return &serviceFound -} - -// CheckByName - Returns true or false depending if name is in ApplicationList -func (a ApplicationsList) CheckByName(name string) bool { - for _, applicationService := range a.Applications { - if applicationService.Name == name { - return true - } - } - return false -} diff --git a/vendor/github.com/sky-uk/gonsx/api/service/update_service.go b/vendor/github.com/sky-uk/gonsx/api/service/update_service.go deleted file mode 100644 index f3e363c..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/service/update_service.go +++ /dev/null @@ -1,24 +0,0 @@ -package service - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateServiceAPI ... -type UpdateServiceAPI struct { - *api.BaseAPI -} - -// NewUpdate creates a new object of UpdateServiceAPI -func NewUpdate(serviceID string, payload *ApplicationService) *UpdateServiceAPI { - this := new(UpdateServiceAPI) - endpointURL := "/api/2.0/services/application/" + serviceID - this.BaseAPI = api.NewBaseAPI(http.MethodPut, endpointURL, payload, new(ApplicationService)) - return this -} - -// GetResponse returns the ResponseObject from UpdateServiceAPI -func (updateAPI UpdateServiceAPI) GetResponse() *ApplicationService { - return updateAPI.ResponseObject().(*ApplicationService) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/tzone/get_all_tzone.go b/vendor/github.com/sky-uk/gonsx/api/tzone/get_all_tzone.go deleted file mode 100644 index d60e16b..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/tzone/get_all_tzone.go +++ /dev/null @@ -1,23 +0,0 @@ -package tzone - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllTransportZonesAPI base api object. -type GetAllTransportZonesAPI struct { - *api.BaseAPI -} - -// NewGetAll returns new object of GetAllTransportZonesAPI -func NewGetAll() *GetAllTransportZonesAPI { - this := new(GetAllTransportZonesAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/vdn/scopes", nil, new(NetworkScopeList)) - return this -} - -// GetResponse returns ResponseObject of GetAllTransportZonesAPI -func (ga GetAllTransportZonesAPI) GetResponse() *NetworkScopeList { - return ga.ResponseObject().(*NetworkScopeList) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/tzone/get_tzone.go b/vendor/github.com/sky-uk/gonsx/api/tzone/get_tzone.go deleted file mode 100644 index fce20cb..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/tzone/get_tzone.go +++ /dev/null @@ -1,23 +0,0 @@ -package tzone - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetTransportZoneAPI base api object. -type GetTransportZoneAPI struct { - *api.BaseAPI -} - -// NewGet returns new object of GetTransportZoneAPI -func NewGet(id string) *GetTransportZoneAPI { - this := new(GetTransportZoneAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/vdn/scopes/"+id, nil, new(NetworkScope)) - return this -} - -// GetResponse returns ResponseObject of GetTransportZoneAPI -func (ga GetTransportZoneAPI) GetResponse() *NetworkScope { - return ga.ResponseObject().(*NetworkScope) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types.go b/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types.go deleted file mode 100644 index 86bdf5e..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types.go +++ /dev/null @@ -1,12 +0,0 @@ -package tzone - -// NetworkScopeList top level xml element -type NetworkScopeList struct { - NetworkScopeList []NetworkScope `xml:"vdnScope"` -} - -// NetworkScope object within NetworkScopeList. -type NetworkScope struct { - ObjectID string `xml:"objectId"` - Name string `xml:"name"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types_functions.go deleted file mode 100644 index dd039b0..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/tzone/tzone_types_functions.go +++ /dev/null @@ -1,23 +0,0 @@ -package tzone - -import "fmt" - -func (s NetworkScopeList) String() string { - return fmt.Sprintf("%s", s.NetworkScopeList) -} - -func (s NetworkScope) String() string { - return fmt.Sprintf("id: %s, name: %s", s.ObjectID, s.Name) -} - -// FilterByName filters NetworkScopeList object given a name and returns the found object. -func (s NetworkScopeList) FilterByName(name string) *NetworkScope { - var networkScopeFound NetworkScope - for _, networkScope := range s.NetworkScopeList { - if networkScope.Name == name { - networkScopeFound = networkScope - break - } - } - return &networkScopeFound -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/create_virtualwire.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/create_virtualwire.go deleted file mode 100644 index 9938b12..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/create_virtualwire.go +++ /dev/null @@ -1,23 +0,0 @@ -package virtualwire - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// CreateVirtualWireAPI base api object. -type CreateVirtualWireAPI struct { - *api.BaseAPI -} - -// NewCreate returns a new object of CreateVirtualWireAPI. -func NewCreate(virtualWireSpec CreateSpec, scopeID string) *CreateVirtualWireAPI { - this := new(CreateVirtualWireAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPost, "/api/2.0/vdn/scopes/"+scopeID+"/virtualwires", virtualWireSpec, new(string)) - return this -} - -// GetResponse returns ResponseObject of CreateVirtualWireAPI. -func (ca CreateVirtualWireAPI) GetResponse() string { - return ca.ResponseObject().(string) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/delete_virtualwire.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/delete_virtualwire.go deleted file mode 100644 index bf7f018..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/delete_virtualwire.go +++ /dev/null @@ -1,18 +0,0 @@ -package virtualwire - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// DeleteVirtualWiresAPI base object. -type DeleteVirtualWiresAPI struct { - *api.BaseAPI -} - -// NewDelete returns a new object of DeleteVirtualWiresAPI. -func NewDelete(virtualWireID string) *DeleteVirtualWiresAPI { - this := new(DeleteVirtualWiresAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodDelete, "/api/2.0/vdn/virtualwires/"+virtualWireID, nil, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_all_virtualwire.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_all_virtualwire.go deleted file mode 100644 index 1b5d110..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_all_virtualwire.go +++ /dev/null @@ -1,23 +0,0 @@ -package virtualwire - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetAllVirtualWiresAPI base object. -type GetAllVirtualWiresAPI struct { - *api.BaseAPI -} - -// NewGetAll returns a new object of GetAllVirtualWiresAPI. -func NewGetAll(scopeID string) *GetAllVirtualWiresAPI { - this := new(GetAllVirtualWiresAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/vdn/scopes/"+scopeID+"/virtualwires", nil, new(VirtualWires)) - return this -} - -// GetResponse returns ResponseObject of GetAllVirtualWiresAPI. -func (ga GetAllVirtualWiresAPI) GetResponse() *VirtualWires { - return ga.ResponseObject().(*VirtualWires) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_virtualwire.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_virtualwire.go deleted file mode 100644 index e4f1e36..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/get_virtualwire.go +++ /dev/null @@ -1,23 +0,0 @@ -package virtualwire - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// GetVirtualWireAPI base object. -type GetVirtualWireAPI struct { - *api.BaseAPI -} - -// NewGet returns new object of GetVirtualWireAPI. -func NewGet(id string) *GetVirtualWireAPI { - this := new(GetVirtualWireAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodGet, "/api/2.0/vdn/virtualwires/"+id, nil, new(VirtualWire)) - return this -} - -// GetResponse returns ResponseObject of GetVirtualWireAPI. -func (ga GetVirtualWireAPI) GetResponse() *VirtualWire { - return ga.ResponseObject().(*VirtualWire) -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/update_virtualwire.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/update_virtualwire.go deleted file mode 100644 index 3e87b30..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/update_virtualwire.go +++ /dev/null @@ -1,18 +0,0 @@ -package virtualwire - -import ( - "github.com/sky-uk/gonsx/api" - "net/http" -) - -// UpdateVirtualWireAPI base api object. -type UpdateVirtualWireAPI struct { - *api.BaseAPI -} - -// NewUpdate returns a new object of UpdateVirtualWireAPI. Returns response code 200 with no content. -func NewUpdate(virtualWire VirtualWire) *UpdateVirtualWireAPI { - this := new(UpdateVirtualWireAPI) - this.BaseAPI = api.NewBaseAPI(http.MethodPut, "/api/2.0/vdn/virtualwires/"+virtualWire.ObjectID, virtualWire, nil) - return this -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types.go deleted file mode 100644 index 0391d35..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types.go +++ /dev/null @@ -1,44 +0,0 @@ -package virtualwire - -import "encoding/xml" - -// VirtualWires - top level xml element -type VirtualWires struct { - DataPage DataPage `xml:"dataPage"` -} - -// DataPage within VirtualWires -type DataPage struct { - VirtualWires []VirtualWire `xml:"virtualWire"` -} - -// VirtualWire is a single virtual wire object within virtualWire list. -type VirtualWire struct { - XMLName xml.Name `xml:"virtualWire"` - Name string `xml:"name"` - ObjectID string `xml:"objectId,omitempty"` - ControlPlaneMode string `xml:"controlPlaneMode"` - Description string `xml:"description"` - TenantID string `xml:"tenantId,omitempty"` - VdnID string `xml:"vdnId,omitempty"` - VdsContext []VdsContext `xml:"vdsContextWithBacking,omitempty"` -} - -// VdsContext represents a port group for which a VirtualWire is provisioned; a VirtualWire can have several VdsContexts -type VdsContext struct { - Switch Switch `xml:"switch"` -} - -// Switch is the logical switch into which a VirtualWire is "plugged" -type Switch struct { - ObjectID string `xml:"objectId"` -} - -// CreateSpec is used in create call on VirtualWire api. -type CreateSpec struct { - XMLName xml.Name `xml:"virtualWireCreateSpec"` - Name string `xml:"name"` - ControlPlaneMode string `xml:"controlPlaneMode"` - Description string `xml:"description"` - TenantID string `xml:"tenantId,omitempty"` -} diff --git a/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types_functions.go b/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types_functions.go deleted file mode 100644 index d97bbeb..0000000 --- a/vendor/github.com/sky-uk/gonsx/api/virtualwire/virtualwire_types_functions.go +++ /dev/null @@ -1,23 +0,0 @@ -package virtualwire - -import "fmt" - -func (s VirtualWires) String() string { - return fmt.Sprintf("%s", s.DataPage) -} - -func (s VirtualWire) String() string { - return fmt.Sprintf("id: %s, name: %s", s.ObjectID, s.Name) -} - -// FilterByName returns a single virtualWire object if it matches the name from VirtualWires -func (s VirtualWires) FilterByName(name string) *VirtualWire { - var virtualWireFound VirtualWire - for _, virtualWire := range s.DataPage.VirtualWires { - if virtualWire.Name == name { - virtualWireFound = virtualWire - break - } - } - return &virtualWireFound -} diff --git a/vendor/github.com/sky-uk/gonsx/client.go b/vendor/github.com/sky-uk/gonsx/client.go deleted file mode 100644 index 10518aa..0000000 --- a/vendor/github.com/sky-uk/gonsx/client.go +++ /dev/null @@ -1,110 +0,0 @@ -package gonsx - -import ( - "bytes" - "crypto/tls" - "encoding/xml" - "fmt" - "github.com/sky-uk/gonsx/api" - "io" - "io/ioutil" - "log" - "net/http" - "strings" -) - -// NewNSXClient Creates a new nsxclient object. -func NewNSXClient(url string, user string, password string, ignoreSSL bool, debug bool) *NSXClient { - nsxClient := new(NSXClient) - nsxClient.URL = url - nsxClient.User = user - nsxClient.Password = password - nsxClient.IgnoreSSL = ignoreSSL - nsxClient.debug = debug - return nsxClient -} - -// NSXClient struct. -type NSXClient struct { - URL string - User string - Password string - IgnoreSSL bool - debug bool -} - -// Do - makes the API call. -func (nsxClient *NSXClient) Do(api api.NSXApi) error { - requestURL := fmt.Sprintf("%s%s", nsxClient.URL, api.Endpoint()) - - var requestPayload io.Reader - if api.RequestObject() != nil { - requestXMLBytes, marshallingErr := xml.Marshal(api.RequestObject()) - if marshallingErr != nil { - log.Fatal(marshallingErr) - } - if nsxClient.debug { - log.Println(string(requestXMLBytes)) - } - requestPayload = bytes.NewReader(requestXMLBytes) - } - if nsxClient.debug { - log.Println("requestURL:", requestURL) - } - req, err := http.NewRequest(api.Method(), requestURL, requestPayload) - if err != nil { - log.Println("ERROR building the request: ", err) - return err - } - - req.SetBasicAuth(nsxClient.User, nsxClient.Password) - // TODO: remove this hardcoded value! - req.Header.Set("Content-Type", "application/xml") - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: nsxClient.IgnoreSSL}, - } - httpClient := &http.Client{Transport: tr} - res, err := httpClient.Do(req) - if err != nil { - log.Println("ERROR executing request: ", err) - return err - } - defer res.Body.Close() - return nsxClient.handleResponse(api, res) -} - -func (nsxClient *NSXClient) handleResponse(api api.NSXApi, res *http.Response) error { - api.SetStatusCode(res.StatusCode) - bodyText, err := ioutil.ReadAll(res.Body) - if err != nil { - log.Println("ERROR reading response: ", err) - return err - } - - api.SetRawResponse(bodyText) - - if nsxClient.debug { - log.Println("STATUS CODE: ", api.StatusCode()) - } - if isXML(res.Header.Get("Content-Type")) && api.StatusCode() == 200 { - xmlerr := xml.Unmarshal(bodyText, api.ResponseObject()) - if xmlerr != nil { - log.Println("ERROR unmarshalling response: ", err) - return err - } - if nsxClient.debug { - log.Printf("DECODED RESPONSE:\n%+v\n", api.ResponseObject()) - } - } else { - api.SetResponseObject(string(bodyText)) - if nsxClient.debug { - log.Println(string(bodyText)) - } - } - return nil -} - -func isXML(contentType string) bool { - return strings.Contains(strings.ToLower(contentType), "/xml") -} diff --git a/vendor/github.com/sky-uk/gonsx/docker-compose.yml b/vendor/github.com/sky-uk/gonsx/docker-compose.yml deleted file mode 100644 index 063e67a..0000000 --- a/vendor/github.com/sky-uk/gonsx/docker-compose.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: '2' -services: - gonsx: - build: . - volumes: - - ./:/go/src/github.com/sky-uk/gonsx - - diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENCE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index e6a7960..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,387 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND -*/ - -package assert - -import ( - - http "net/http" - url "net/url" - time "time" -) - - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - return Error(a.t, err, msgAndArgs...) -} - - -// Exactly asserts that two objects are equal is value and type. -// -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - return FailNow(a.t, failureMessage, msgAndArgs...) -} - - -// False asserts that the specified value is false. -// -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) -} - - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) -} - - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) -} - - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) -} - - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) -} - - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - - -// Nil asserts that the specified object is nil. -// -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - return NoError(a.t, err, msgAndArgs...) -} - - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - return NotZero(a.t, i, msgAndArgs...) -} - - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - - -// True asserts that the specified value is true. -// -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - return Zero(a.t, i, msgAndArgs...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 99f9acf..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 348d5f1..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1007 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "math" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - - if expected == nil || actual == nil { - return expected == actual - } - - return reflect.DeepEqual(expected, actual) - -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - return nil - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - - errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") - if len(message) > 0 { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2)) - } - - return false -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true - -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -var numericZeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - if object == nil { - return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range numericZeros { - if object == v { - return true - } - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Struct: - switch object.(type) { - case time.Time: - return object.(time.Time).IsZero() - } - case reflect.Ptr: - { - if objValue.IsNil() { - return true - } - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } - } - } - return false -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) - if !result { - return result - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if err == nil { - return Fail(t, "An error is expected but got nil. %s", message) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { - return false - } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, errString, theError.Error(), - s, errString, theError.Error(), message) -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - spew.Config.SortKeys = true - e := spew.Sdump(expected) - a := spew.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index b867e95..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index e1b9442..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,106 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return -1 - } - handler(w, req) - return w.Code -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusOK && code <= http.StatusPartialContent -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusBadRequest -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) - } - - return !contains -} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc31160..0000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index 202fa8a..0000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n += 1 - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n += 1 - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f19..0000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 2641dad..0000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d040775..0000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index 0b4af37..0000000 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common -// OpenPGP cipher. -package cast5 // import "golang.org/x/crypto/cast5" - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h deleted file mode 100644 index b3f7416..0000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.h +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s deleted file mode 100644 index ee7b4bd..0000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s deleted file mode 100644 index cd793a5..0000000 --- a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 2d14c2a..0000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// We have a implementation in amd64 assembly so this code is only run on -// non-amd64 platforms. The amd64 assembly does not support gccgo. -// +build !amd64 gccgo appengine - -package curve25519 - -import ( - "encoding/binary" -) - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := load3(src[29:]) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go deleted file mode 100644 index da9b10d..0000000 --- a/vendor/golang.org/x/crypto/curve25519/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of scalar multiplication on -// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html -package curve25519 // import "golang.org/x/crypto/curve25519" - -// basePoint is the x coordinate of the generator of the curve. -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -// ScalarMult sets dst to the product in*base where dst and base are the x -// coordinates of group points and all values are in little-endian form. -func ScalarMult(dst, in, base *[32]byte) { - scalarMult(dst, in, base) -} - -// ScalarBaseMult sets dst to the product in*base where dst and base are the x -// coordinates of group points, base is the standard generator and all values -// are in little-endian form. -func ScalarBaseMult(dst, in *[32]byte) { - ScalarMult(dst, in, &basePoint) -} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s deleted file mode 100644 index 3908161..0000000 --- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s deleted file mode 100644 index 9e9040b..0000000 --- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s +++ /dev/null @@ -1,1377 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$296-8 - MOVQ inout+0(FP),DI - - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,0(SP) - MOVQ DX,8(SP) - MOVQ CX,16(SP) - MOVQ R8,24(SP) - MOVQ R9,32(SP) - MOVQ AX,40(SP) - MOVQ R10,48(SP) - MOVQ R11,56(SP) - MOVQ R12,64(SP) - MOVQ R13,72(SP) - MOVQ 40(SP),AX - MULQ 40(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 48(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 48(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(SP) - MOVQ R8,88(SP) - MOVQ R9,96(SP) - MOVQ AX,104(SP) - MOVQ R10,112(SP) - MOVQ 0(SP),AX - MULQ 0(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 8(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 32(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(SP) - MOVQ R8,128(SP) - MOVQ R9,136(SP) - MOVQ AX,144(SP) - MOVQ R10,152(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 80(SP),SI - SUBQ 88(SP),DX - SUBQ 96(SP),CX - SUBQ 104(SP),R8 - SUBQ 112(SP),R9 - MOVQ SI,160(SP) - MOVQ DX,168(SP) - MOVQ CX,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,200(SP) - MOVQ DX,208(SP) - MOVQ CX,216(SP) - MOVQ R8,224(SP) - MOVQ R9,232(SP) - MOVQ AX,240(SP) - MOVQ R10,248(SP) - MOVQ R11,256(SP) - MOVQ R12,264(SP) - MOVQ R13,272(SP) - MOVQ 224(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,280(SP) - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 232(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,288(SP) - MULQ 48(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 40(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 200(SP),AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 200(SP),AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 200(SP),AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 208(SP),AX - MULQ 40(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 208(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),AX - MULQ 40(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 216(SP),AX - MULQ 48(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 216(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 224(SP),AX - MULQ 40(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 224(SP),AX - MULQ 48(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 280(SP),AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 232(SP),AX - MULQ 40(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 288(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 288(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(SP) - MOVQ R8,48(SP) - MOVQ R9,56(SP) - MOVQ AX,64(SP) - MOVQ R10,72(SP) - MOVQ 264(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,200(SP) - MULQ 16(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,208(SP) - MULQ 8(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 0(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 240(SP),AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 240(SP),AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 248(SP),AX - MULQ 0(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 248(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 248(SP),AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 248(SP),AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 248(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 0(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 256(SP),AX - MULQ 8(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 256(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 0(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 8(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 200(SP),AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 0(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),AX - MULQ 16(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 24(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 40(SP),SI - ADDQ 48(SP),R8 - ADDQ 56(SP),R9 - ADDQ 64(SP),AX - ADDQ 72(SP),R10 - SUBQ 40(SP),DX - SUBQ 48(SP),CX - SUBQ 56(SP),R11 - SUBQ 64(SP),R12 - SUBQ 72(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 144(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 152(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 88(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(SP),AX - MULQ 96(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(SP),AX - MULQ 104(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(SP),AX - MULQ 112(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 128(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 136(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 136(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(SP),AX - MULQ 80(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 144(SP),AX - MULQ 88(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 104(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 112(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 160(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 168(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 176(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 184(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 192(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 80(SP),SI - ADDQ 88(SP),CX - ADDQ 96(SP),R8 - ADDQ 104(SP),R9 - ADDQ 112(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 176(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 168(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 176(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 184(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 192(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 176(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 184(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 176(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 160(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 168(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 184(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 192(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 176(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 184(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 192(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go deleted file mode 100644 index 5822bd5..0000000 --- a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s deleted file mode 100644 index 5ce80a2..0000000 --- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R9:R8 - ANDQ SI,R8 - SHLQ $13,R11:R10 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BP:BX - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s deleted file mode 100644 index 12f7373..0000000 --- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,R8:CX - ANDQ SI,CX - SHLQ $13,R10:R9 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R12:R11 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R14:R13 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,BX:R15 - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index 9b07acc..0000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05. -package ed25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "crypto/subtle" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { - if rand == nil { - rand = cryptorand.Reader - } - - privateKey = make([]byte, PrivateKeySize) - publicKey = make([]byte, PublicKeySize) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - digest := sha512.Sum512(privateKey[:32]) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - copy(privateKey[32:], publicKeyBytes[:]) - copy(publicKey, publicKeyBytes[:]) - - return publicKey, privateKey, nil -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086..0000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index 5f8b994..0000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1771 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index 592d186..0000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582..0000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e38..0000000 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 73f4fe3..0000000 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - s.ModInverse(s, priv.P) - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index eb0550b..0000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index 68b14c6..0000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } else { - e.PrimaryKey = &e.PrivateKey.PublicKey - } - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var current *Identity - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - current = new(Identity) - current.Name = pkt.Id - current.UserId = pkt - e.Identities[pkt.Id] = current - - for { - p, err = packets.Next() - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } else if err != nil { - return nil, err - } - - sig, ok := p.(*packet.Signature) - if !ok { - return nil, errors.StructuralError("user ID packet not followed by self-signature") - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - current.SelfSignature = sig - break - } - current.Signatures = append(current.Signatures, sig) - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } else if current == nil { - return nil, errors.StructuralError("signature packet found before user id packet") - } else { - current.Signatures = append(current.Signatures, pkt) - } - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - p, err := packets.Next() - if err == io.EOF { - return io.ErrUnexpectedEOF - } - if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - var ok bool - subKey.Sig, ok = p.(*packet.Signature) - if !ok { - return errors.StructuralError("subkey packet not followed by signature") - } - if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig) - if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - e.Subkeys = append(e.Subkeys, subKey) - return nil -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - currentTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Name, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, to -// the given Writer. For now, it must only be used on an Entity returned from -// NewEntity. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w. (No private -// key material will be output). -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5c..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 266840d..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 1713503..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 456d807..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 3eded93..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" - "io" - "math/big" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - for power := uint(14); power < 32; power-- { - l := 1 << power - if len(p) >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(p[:l]) - n += m - if err != nil { - return - } - p = p[l:] - break - } - } - } - return -} - -func (w *partialLengthWriter) Close() error { - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - return -} - -// mpiLength returns the length of the given *big.Int when serialized as an -// MPI. -func mpiLength(n *big.Int) (mpiLengthInBytes int) { - mpiLengthInBytes = 2 /* MPI length */ - mpiLengthInBytes += (n.BitLen() + 7) / 8 - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 34734cc..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - switch pubkey := signer.Public().(type) { - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) - pk.PubKeyAlgo = PubKeyAlgoRSASignOnly - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = ioutil.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index ead2623..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,748 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c6..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index 6ce0cbe..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff88..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 6126030..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index 96a2b38..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// the user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d..0000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 6ec664f..0000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 4b9a44c..0000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index 65a304c..0000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(encryptedData); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := encryptedData - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{encryptedData} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 6931b51..0000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive os.EOF. -func (b *buffer) eof() error { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() - return nil -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index b1f0220..0000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - return &openSSHCertSigner{cert, signer}, nil -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial) - } - - for opt, _ := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index 195530e..0000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (c *channel) writePacket(packet []byte) error { - c.writeMu.Lock() - if c.sentClose { - c.writeMu.Unlock() - return io.EOF - } - c.sentClose = (packet[0] == msgChannelClose) - err := c.mux.conn.writePacket(packet) - c.writeMu.Unlock() - return err -} - -func (c *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", c.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], c.remoteId) - return c.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if c.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - c.writeMu.Lock() - packet := c.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - c.writeMu.Unlock() - - for len(data) > 0 { - space := min(c.maxRemotePayload, len(data)) - if space, err = c.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], c.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = c.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - c.writeMu.Lock() - c.packetPool[extendedCode] = packet - c.writeMu.Unlock() - - return n, err -} - -func (c *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > c.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - c.windowMu.Lock() - if c.myWindow < length { - c.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - c.myWindow -= length - c.windowMu.Unlock() - - if extended == 1 { - c.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - c.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (c *channel) responseMessageReceived() error { - if c.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if c.decided { - return errors.New("ssh: duplicate response received for channel") - } - c.decided = true - return nil -} - -func (c *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return c.handleData(packet) - case msgChannelClose: - c.sendMessage(channelCloseMsg{PeersId: c.remoteId}) - c.mux.chanList.remove(c.localId) - c.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - c.extPending.eof() - c.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := c.responseMessageReceived(); err != nil { - return err - } - c.mux.chanList.remove(msg.PeersId) - c.msg <- msg - case *channelOpenConfirmMsg: - if err := c.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - c.remoteId = msg.MyId - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.MyWindow) - c.msg <- msg - case *windowAdjustMsg: - if !c.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: c, - } - - c.incomingRequests <- &req - default: - c.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (c *channel) Accept() (Channel, <-chan *Request, error) { - if c.decided { - return nil, nil, errDecidedAlready - } - c.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersId: c.remoteId, - MyId: c.localId, - MyWindow: c.myWindow, - MaxPacketSize: c.maxIncomingPayload, - } - c.decided = true - if err := c.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return c, c.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersId: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersId: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersId: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersId: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersId: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersId: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 13484ab..0000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type streamCipherMode struct { - keySize int - ivSize int - skip int - createFunc func(key, iv []byte) (cipher.Stream, error) -} - -func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { - if len(key) < c.keySize { - panic("ssh: key length too small for cipher") - } - if len(iv) < c.ivSize { - panic("ssh: iv too small for cipher") - } - - stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) - if err != nil { - return nil, err - } - - var streamDump []byte - if c.skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := c.skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - return stream, nil -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*streamCipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, - "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, - "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, 1536, newRC4}, - "arcfour256": {32, 0, 1536, newRC4}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, 0, newRC4}, - - // AES-GCM is not a stream cipher, so it is constructed with a - // special case. If we add any more non-stream ciphers, we - // should invest a cleaner way to do this. - gcmCipherID: {16, 12, 0, nil}, - - // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, 0, nil}, - - // 3des-cbc is insecure and is disabled by default. - tripledescbcID: {24, des.BlockSize, 0, nil}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writePacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded.") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 || padding >= 20 { - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { - return nil, err - } else { - c.oracleCamouflage -= uint32(n) - } - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index a7e3263..0000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com")) - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index b882da0..0000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - tried := make(map[string]bool) - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok { - // success - return nil - } - tried[auth.method()] = true - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if tried[candidateMethod] { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) -} - -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) - } - return s -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return false, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return false, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return false, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return false, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return false, nil, err - } - var success bool - success, methods, err = handleAuthResponse(c) - if err != nil { - return false, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return false, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - // TODO(gpaul): add callback to present the banner to the user - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (bool, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - // TODO: add callback to present the banner to the user - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns a AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return false, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - // TODO: Print banners during userauth. - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return false, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return false, nil, err - } - - if len(answers) != len(prompts) { - return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return false, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok || err != nil { // either success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index dc39e4d..0000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers specifies the supported ciphers in preference order. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - "arcfour256", "arcfour128", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = supportedCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = supportedKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionId, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b068..0000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322..0000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 932ce83..0000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - if len(t.hostKeys) == 0 { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index f91c277..0000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - kInt, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - kInt, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, nil -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - kInt := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - kInt := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 7a8756a..0000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1006 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != r.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (r *dsaPublicKey) Type() string { - return "ssh-dss" -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (key *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + key.nistID() -} - -func (key *ecdsaPublicKey) nistID() string { - switch key.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (key ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := ed25519.PublicKey(w.KeyBytes) - - return (ed25519PublicKey)(key), w.Rest, nil -} - -func (key ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(key), - } - return Marshal(&w) -} - -func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != key.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) - } - - edKey := (ed25519.PublicKey)(key) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (key *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - key.Type(), - key.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != key.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) - } - - h := ecHash(key.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding -// Signer instance. ECDSA keys must use P-256, P-384 or P-521. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return &dsaPrivateKey{key}, nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - var hashFunc crypto.Hash - - switch key := s.pubKey.(type) { - case *rsaPublicKey, *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: s.pubKey.Type(), - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - buf := block.Bytes - - if encryptedBlock(block) { - if x509.IsEncryptedPEMBlock(block) { - var err error - buf, err = x509.DecryptPEMBlock(block, passPhrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - } - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { - magic := append([]byte("openssh-key-v1"), 0) - if !bytes.Equal(magic, key[0:len(magic)]) { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") - } - - // we only handle ed25519 and rsa keys currently - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a062..0000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index e6ecd3a..0000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 - - // Standard authentication messages - msgUserAuthSuccess = 52 - msgUserAuthBanner = 53 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersId uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersId uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersId uint32 `sshtype:"91"` - MyId uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersId uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersId uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersId uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersId uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersId uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersId uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersId uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 27a527c..0000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersId: msg.PeersId, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersId - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersId: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index b6f4cc8..0000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return true - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -// ServerAuthError implements the error interface. It appends any authentication -// errors that may occur, and is returned if all of the authentication methods -// provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - perms = nil - authErr := errors.New("no auth passed yet") - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index 17e2aa8..0000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for _ = range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index a2dccc6..0000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,115 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index acf1717..0000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index f9780e0..0000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writePacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { - return err - } else { - t.reader.pendingKeyChange <- ciph - } - - if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil { - return err - } else { - t.writer.pendingKeyChange <- ciph - } - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message.") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// generateKeys generates key material for IV, MAC and encryption. -func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv = make([]byte, cipherMode.ivSize) - key = make([]byte, cipherMode.keySize) - macKey = make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - return -} - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - iv, key, macKey := generateKeys(d, algs, kex) - - if algs.Cipher == gcmCipherID { - return newGCMCipher(iv, key, macKey) - } - - if algs.Cipher == aes128cbcID { - return newAESCBCCipher(iv, key, macKey, algs) - } - - if algs.Cipher == tripledescbcID { - return newTripleDESCBCCipher(iv, key, macKey, algs) - } - - c := &streamPacketCipher{ - mac: macModes[algs.MAC].new(macKey), - etm: macModes[algs.MAC].etm, - } - c.macResult = make([]byte, c.mac.Size()) - - var err error - c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) - if err != nil { - return nil, err - } - - return c, nil -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for len(versionString) < maxVersionStringBytes { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index d3681ab..0000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b..0000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1d..0000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592..0000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80..0000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac..0000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2605ba3..0000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,713 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x2106 - AcceptCharset Atom = 0x210e - Accesskey Atom = 0x3309 - Action Atom = 0x1f606 - Address Atom = 0x4f307 - Align Atom = 0x1105 - Alt Atom = 0x4503 - Annotation Atom = 0x1670a - AnnotationXml Atom = 0x1670e - Applet Atom = 0x2b306 - Area Atom = 0x2fa04 - Article Atom = 0x38807 - Aside Atom = 0x8305 - Async Atom = 0x7b05 - Audio Atom = 0xa605 - Autocomplete Atom = 0x1fc0c - Autofocus Atom = 0xb309 - Autoplay Atom = 0xce08 - B Atom = 0x101 - Base Atom = 0xd604 - Basefont Atom = 0xd608 - Bdi Atom = 0x1a03 - Bdo Atom = 0xe703 - Bgsound Atom = 0x11807 - Big Atom = 0x12403 - Blink Atom = 0x12705 - Blockquote Atom = 0x12c0a - Body Atom = 0x2f04 - Br Atom = 0x202 - Button Atom = 0x13606 - Canvas Atom = 0x7f06 - Caption Atom = 0x1bb07 - Center Atom = 0x5b506 - Challenge Atom = 0x21f09 - Charset Atom = 0x2807 - Checked Atom = 0x32807 - Cite Atom = 0x3c804 - Class Atom = 0x4de05 - Code Atom = 0x14904 - Col Atom = 0x15003 - Colgroup Atom = 0x15008 - Color Atom = 0x15d05 - Cols Atom = 0x16204 - Colspan Atom = 0x16207 - Command Atom = 0x17507 - Content Atom = 0x42307 - Contenteditable Atom = 0x4230f - Contextmenu Atom = 0x3310b - Controls Atom = 0x18808 - Coords Atom = 0x19406 - Crossorigin Atom = 0x19f0b - Data Atom = 0x44a04 - Datalist Atom = 0x44a08 - Datetime Atom = 0x23c08 - Dd Atom = 0x26702 - Default Atom = 0x8607 - Defer Atom = 0x14b05 - Del Atom = 0x3ef03 - Desc Atom = 0x4db04 - Details Atom = 0x4807 - Dfn Atom = 0x6103 - Dialog Atom = 0x1b06 - Dir Atom = 0x6903 - Dirname Atom = 0x6907 - Disabled Atom = 0x10c08 - Div Atom = 0x11303 - Dl Atom = 0x11e02 - Download Atom = 0x40008 - Draggable Atom = 0x17b09 - Dropzone Atom = 0x39108 - Dt Atom = 0x50902 - Em Atom = 0x6502 - Embed Atom = 0x6505 - Enctype Atom = 0x21107 - Face Atom = 0x5b304 - Fieldset Atom = 0x1b008 - Figcaption Atom = 0x1b80a - Figure Atom = 0x1cc06 - Font Atom = 0xda04 - Footer Atom = 0x8d06 - For Atom = 0x1d803 - ForeignObject Atom = 0x1d80d - Foreignobject Atom = 0x1e50d - Form Atom = 0x1f204 - Formaction Atom = 0x1f20a - Formenctype Atom = 0x20d0b - Formmethod Atom = 0x2280a - Formnovalidate Atom = 0x2320e - Formtarget Atom = 0x2470a - Frame Atom = 0x9a05 - Frameset Atom = 0x9a08 - H1 Atom = 0x26e02 - H2 Atom = 0x29402 - H3 Atom = 0x2a702 - H4 Atom = 0x2e902 - H5 Atom = 0x2f302 - H6 Atom = 0x50b02 - Head Atom = 0x2d504 - Header Atom = 0x2d506 - Headers Atom = 0x2d507 - Height Atom = 0x25106 - Hgroup Atom = 0x25906 - Hidden Atom = 0x26506 - High Atom = 0x26b04 - Hr Atom = 0x27002 - Href Atom = 0x27004 - Hreflang Atom = 0x27008 - Html Atom = 0x25504 - HttpEquiv Atom = 0x2780a - I Atom = 0x601 - Icon Atom = 0x42204 - Id Atom = 0x8502 - Iframe Atom = 0x29606 - Image Atom = 0x29c05 - Img Atom = 0x2a103 - Input Atom = 0x3e805 - Inputmode Atom = 0x3e809 - Ins Atom = 0x1a803 - Isindex Atom = 0x2a907 - Ismap Atom = 0x2b005 - Itemid Atom = 0x33c06 - Itemprop Atom = 0x3c908 - Itemref Atom = 0x5ad07 - Itemscope Atom = 0x2b909 - Itemtype Atom = 0x2c308 - Kbd Atom = 0x1903 - Keygen Atom = 0x3906 - Keytype Atom = 0x53707 - Kind Atom = 0x10904 - Label Atom = 0xf005 - Lang Atom = 0x27404 - Legend Atom = 0x18206 - Li Atom = 0x1202 - Link Atom = 0x12804 - List Atom = 0x44e04 - Listing Atom = 0x44e07 - Loop Atom = 0xf404 - Low Atom = 0x11f03 - Malignmark Atom = 0x100a - Manifest Atom = 0x5f108 - Map Atom = 0x2b203 - Mark Atom = 0x1604 - Marquee Atom = 0x2cb07 - Math Atom = 0x2d204 - Max Atom = 0x2e103 - Maxlength Atom = 0x2e109 - Media Atom = 0x6e05 - Mediagroup Atom = 0x6e0a - Menu Atom = 0x33804 - Menuitem Atom = 0x33808 - Meta Atom = 0x45d04 - Meter Atom = 0x24205 - Method Atom = 0x22c06 - Mglyph Atom = 0x2a206 - Mi Atom = 0x2eb02 - Min Atom = 0x2eb03 - Minlength Atom = 0x2eb09 - Mn Atom = 0x23502 - Mo Atom = 0x3ed02 - Ms Atom = 0x2bc02 - Mtext Atom = 0x2f505 - Multiple Atom = 0x30308 - Muted Atom = 0x30b05 - Name Atom = 0x6c04 - Nav Atom = 0x3e03 - Nobr Atom = 0x5704 - Noembed Atom = 0x6307 - Noframes Atom = 0x9808 - Noscript Atom = 0x3d208 - Novalidate Atom = 0x2360a - Object Atom = 0x1ec06 - Ol Atom = 0xc902 - Onabort Atom = 0x13a07 - Onafterprint Atom = 0x1c00c - Onautocomplete Atom = 0x1fa0e - Onautocompleteerror Atom = 0x1fa13 - Onbeforeprint Atom = 0x6040d - Onbeforeunload Atom = 0x4e70e - Onblur Atom = 0xaa06 - Oncancel Atom = 0xe908 - Oncanplay Atom = 0x28509 - Oncanplaythrough Atom = 0x28510 - Onchange Atom = 0x3a708 - Onclick Atom = 0x31007 - Onclose Atom = 0x31707 - Oncontextmenu Atom = 0x32f0d - Oncuechange Atom = 0x3420b - Ondblclick Atom = 0x34d0a - Ondrag Atom = 0x35706 - Ondragend Atom = 0x35709 - Ondragenter Atom = 0x3600b - Ondragleave Atom = 0x36b0b - Ondragover Atom = 0x3760a - Ondragstart Atom = 0x3800b - Ondrop Atom = 0x38f06 - Ondurationchange Atom = 0x39f10 - Onemptied Atom = 0x39609 - Onended Atom = 0x3af07 - Onerror Atom = 0x3b607 - Onfocus Atom = 0x3bd07 - Onhashchange Atom = 0x3da0c - Oninput Atom = 0x3e607 - Oninvalid Atom = 0x3f209 - Onkeydown Atom = 0x3fb09 - Onkeypress Atom = 0x4080a - Onkeyup Atom = 0x41807 - Onlanguagechange Atom = 0x43210 - Onload Atom = 0x44206 - Onloadeddata Atom = 0x4420c - Onloadedmetadata Atom = 0x45510 - Onloadstart Atom = 0x46b0b - Onmessage Atom = 0x47609 - Onmousedown Atom = 0x47f0b - Onmousemove Atom = 0x48a0b - Onmouseout Atom = 0x4950a - Onmouseover Atom = 0x4a20b - Onmouseup Atom = 0x4ad09 - Onmousewheel Atom = 0x4b60c - Onoffline Atom = 0x4c209 - Ononline Atom = 0x4cb08 - Onpagehide Atom = 0x4d30a - Onpageshow Atom = 0x4fe0a - Onpause Atom = 0x50d07 - Onplay Atom = 0x51706 - Onplaying Atom = 0x51709 - Onpopstate Atom = 0x5200a - Onprogress Atom = 0x52a0a - Onratechange Atom = 0x53e0c - Onreset Atom = 0x54a07 - Onresize Atom = 0x55108 - Onscroll Atom = 0x55f08 - Onseeked Atom = 0x56708 - Onseeking Atom = 0x56f09 - Onselect Atom = 0x57808 - Onshow Atom = 0x58206 - Onsort Atom = 0x58b06 - Onstalled Atom = 0x59509 - Onstorage Atom = 0x59e09 - Onsubmit Atom = 0x5a708 - Onsuspend Atom = 0x5bb09 - Ontimeupdate Atom = 0xdb0c - Ontoggle Atom = 0x5c408 - Onunload Atom = 0x5cc08 - Onvolumechange Atom = 0x5d40e - Onwaiting Atom = 0x5e209 - Open Atom = 0x3cf04 - Optgroup Atom = 0xf608 - Optimum Atom = 0x5eb07 - Option Atom = 0x60006 - Output Atom = 0x49c06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x5107 - Ping Atom = 0x7704 - Placeholder Atom = 0xc30b - Plaintext Atom = 0xfd09 - Poster Atom = 0x15706 - Pre Atom = 0x25e03 - Preload Atom = 0x25e07 - Progress Atom = 0x52c08 - Prompt Atom = 0x5fa06 - Public Atom = 0x41e06 - Q Atom = 0x13101 - Radiogroup Atom = 0x30a - Readonly Atom = 0x2fb08 - Rel Atom = 0x25f03 - Required Atom = 0x1d008 - Reversed Atom = 0x5a08 - Rows Atom = 0x9204 - Rowspan Atom = 0x9207 - Rp Atom = 0x1c602 - Rt Atom = 0x13f02 - Ruby Atom = 0xaf04 - S Atom = 0x2c01 - Samp Atom = 0x4e04 - Sandbox Atom = 0xbb07 - Scope Atom = 0x2bd05 - Scoped Atom = 0x2bd06 - Script Atom = 0x3d406 - Seamless Atom = 0x31c08 - Section Atom = 0x4e207 - Select Atom = 0x57a06 - Selected Atom = 0x57a08 - Shape Atom = 0x4f905 - Size Atom = 0x55504 - Sizes Atom = 0x55505 - Small Atom = 0x18f05 - Sortable Atom = 0x58d08 - Sorted Atom = 0x19906 - Source Atom = 0x1aa06 - Spacer Atom = 0x2db06 - Span Atom = 0x9504 - Spellcheck Atom = 0x3230a - Src Atom = 0x3c303 - Srcdoc Atom = 0x3c306 - Srclang Atom = 0x41107 - Start Atom = 0x38605 - Step Atom = 0x5f704 - Strike Atom = 0x53306 - Strong Atom = 0x55906 - Style Atom = 0x61105 - Sub Atom = 0x5a903 - Summary Atom = 0x61607 - Sup Atom = 0x61d03 - Svg Atom = 0x62003 - System Atom = 0x62306 - Tabindex Atom = 0x46308 - Table Atom = 0x42d05 - Target Atom = 0x24b06 - Tbody Atom = 0x2e05 - Td Atom = 0x4702 - Template Atom = 0x62608 - Textarea Atom = 0x2f608 - Tfoot Atom = 0x8c05 - Th Atom = 0x22e02 - Thead Atom = 0x2d405 - Time Atom = 0xdd04 - Title Atom = 0xa105 - Tr Atom = 0x10502 - Track Atom = 0x10505 - Translate Atom = 0x14009 - Tt Atom = 0x5302 - Type Atom = 0x21404 - Typemustmatch Atom = 0x2140d - U Atom = 0xb01 - Ul Atom = 0x8a02 - Usemap Atom = 0x51106 - Value Atom = 0x4005 - Var Atom = 0x11503 - Video Atom = 0x28105 - Wbr Atom = 0x12103 - Width Atom = 0x50705 - Wrap Atom = 0x58704 - Xmp Atom = 0xc103 -) - -const hash0 = 0xc17da63e - -const maxAtomLen = 19 - -var table = [1 << 9]Atom{ - 0x1: 0x48a0b, // onmousemove - 0x2: 0x5e209, // onwaiting - 0x3: 0x1fa13, // onautocompleteerror - 0x4: 0x5fa06, // prompt - 0x7: 0x5eb07, // optimum - 0x8: 0x1604, // mark - 0xa: 0x5ad07, // itemref - 0xb: 0x4fe0a, // onpageshow - 0xc: 0x57a06, // select - 0xd: 0x17b09, // draggable - 0xe: 0x3e03, // nav - 0xf: 0x17507, // command - 0x11: 0xb01, // u - 0x14: 0x2d507, // headers - 0x15: 0x44a08, // datalist - 0x17: 0x4e04, // samp - 0x1a: 0x3fb09, // onkeydown - 0x1b: 0x55f08, // onscroll - 0x1c: 0x15003, // col - 0x20: 0x3c908, // itemprop - 0x21: 0x2780a, // http-equiv - 0x22: 0x61d03, // sup - 0x24: 0x1d008, // required - 0x2b: 0x25e07, // preload - 0x2c: 0x6040d, // onbeforeprint - 0x2d: 0x3600b, // ondragenter - 0x2e: 0x50902, // dt - 0x2f: 0x5a708, // onsubmit - 0x30: 0x27002, // hr - 0x31: 0x32f0d, // oncontextmenu - 0x33: 0x29c05, // image - 0x34: 0x50d07, // onpause - 0x35: 0x25906, // hgroup - 0x36: 0x7704, // ping - 0x37: 0x57808, // onselect - 0x3a: 0x11303, // div - 0x3b: 0x1fa0e, // onautocomplete - 0x40: 0x2eb02, // mi - 0x41: 0x31c08, // seamless - 0x42: 0x2807, // charset - 0x43: 0x8502, // id - 0x44: 0x5200a, // onpopstate - 0x45: 0x3ef03, // del - 0x46: 0x2cb07, // marquee - 0x47: 0x3309, // accesskey - 0x49: 0x8d06, // footer - 0x4a: 0x44e04, // list - 0x4b: 0x2b005, // ismap - 0x51: 0x33804, // menu - 0x52: 0x2f04, // body - 0x55: 0x9a08, // frameset - 0x56: 0x54a07, // onreset - 0x57: 0x12705, // blink - 0x58: 0xa105, // title - 0x59: 0x38807, // article - 0x5b: 0x22e02, // th - 0x5d: 0x13101, // q - 0x5e: 0x3cf04, // open - 0x5f: 0x2fa04, // area - 0x61: 0x44206, // onload - 0x62: 0xda04, // font - 0x63: 0xd604, // base - 0x64: 0x16207, // colspan - 0x65: 0x53707, // keytype - 0x66: 0x11e02, // dl - 0x68: 0x1b008, // fieldset - 0x6a: 0x2eb03, // min - 0x6b: 0x11503, // var - 0x6f: 0x2d506, // header - 0x70: 0x13f02, // rt - 0x71: 0x15008, // colgroup - 0x72: 0x23502, // mn - 0x74: 0x13a07, // onabort - 0x75: 0x3906, // keygen - 0x76: 0x4c209, // onoffline - 0x77: 0x21f09, // challenge - 0x78: 0x2b203, // map - 0x7a: 0x2e902, // h4 - 0x7b: 0x3b607, // onerror - 0x7c: 0x2e109, // maxlength - 0x7d: 0x2f505, // mtext - 0x7e: 0xbb07, // sandbox - 0x7f: 0x58b06, // onsort - 0x80: 0x100a, // malignmark - 0x81: 0x45d04, // meta - 0x82: 0x7b05, // async - 0x83: 0x2a702, // h3 - 0x84: 0x26702, // dd - 0x85: 0x27004, // href - 0x86: 0x6e0a, // mediagroup - 0x87: 0x19406, // coords - 0x88: 0x41107, // srclang - 0x89: 0x34d0a, // ondblclick - 0x8a: 0x4005, // value - 0x8c: 0xe908, // oncancel - 0x8e: 0x3230a, // spellcheck - 0x8f: 0x9a05, // frame - 0x91: 0x12403, // big - 0x94: 0x1f606, // action - 0x95: 0x6903, // dir - 0x97: 0x2fb08, // readonly - 0x99: 0x42d05, // table - 0x9a: 0x61607, // summary - 0x9b: 0x12103, // wbr - 0x9c: 0x30a, // radiogroup - 0x9d: 0x6c04, // name - 0x9f: 0x62306, // system - 0xa1: 0x15d05, // color - 0xa2: 0x7f06, // canvas - 0xa3: 0x25504, // html - 0xa5: 0x56f09, // onseeking - 0xac: 0x4f905, // shape - 0xad: 0x25f03, // rel - 0xae: 0x28510, // oncanplaythrough - 0xaf: 0x3760a, // ondragover - 0xb0: 0x62608, // template - 0xb1: 0x1d80d, // foreignObject - 0xb3: 0x9204, // rows - 0xb6: 0x44e07, // listing - 0xb7: 0x49c06, // output - 0xb9: 0x3310b, // contextmenu - 0xbb: 0x11f03, // low - 0xbc: 0x1c602, // rp - 0xbd: 0x5bb09, // onsuspend - 0xbe: 0x13606, // button - 0xbf: 0x4db04, // desc - 0xc1: 0x4e207, // section - 0xc2: 0x52a0a, // onprogress - 0xc3: 0x59e09, // onstorage - 0xc4: 0x2d204, // math - 0xc5: 0x4503, // alt - 0xc7: 0x8a02, // ul - 0xc8: 0x5107, // pattern - 0xc9: 0x4b60c, // onmousewheel - 0xca: 0x35709, // ondragend - 0xcb: 0xaf04, // ruby - 0xcc: 0xc01, // p - 0xcd: 0x31707, // onclose - 0xce: 0x24205, // meter - 0xcf: 0x11807, // bgsound - 0xd2: 0x25106, // height - 0xd4: 0x101, // b - 0xd5: 0x2c308, // itemtype - 0xd8: 0x1bb07, // caption - 0xd9: 0x10c08, // disabled - 0xdb: 0x33808, // menuitem - 0xdc: 0x62003, // svg - 0xdd: 0x18f05, // small - 0xde: 0x44a04, // data - 0xe0: 0x4cb08, // ononline - 0xe1: 0x2a206, // mglyph - 0xe3: 0x6505, // embed - 0xe4: 0x10502, // tr - 0xe5: 0x46b0b, // onloadstart - 0xe7: 0x3c306, // srcdoc - 0xeb: 0x5c408, // ontoggle - 0xed: 0xe703, // bdo - 0xee: 0x4702, // td - 0xef: 0x8305, // aside - 0xf0: 0x29402, // h2 - 0xf1: 0x52c08, // progress - 0xf2: 0x12c0a, // blockquote - 0xf4: 0xf005, // label - 0xf5: 0x601, // i - 0xf7: 0x9207, // rowspan - 0xfb: 0x51709, // onplaying - 0xfd: 0x2a103, // img - 0xfe: 0xf608, // optgroup - 0xff: 0x42307, // content - 0x101: 0x53e0c, // onratechange - 0x103: 0x3da0c, // onhashchange - 0x104: 0x4807, // details - 0x106: 0x40008, // download - 0x109: 0x14009, // translate - 0x10b: 0x4230f, // contenteditable - 0x10d: 0x36b0b, // ondragleave - 0x10e: 0x2106, // accept - 0x10f: 0x57a08, // selected - 0x112: 0x1f20a, // formaction - 0x113: 0x5b506, // center - 0x115: 0x45510, // onloadedmetadata - 0x116: 0x12804, // link - 0x117: 0xdd04, // time - 0x118: 0x19f0b, // crossorigin - 0x119: 0x3bd07, // onfocus - 0x11a: 0x58704, // wrap - 0x11b: 0x42204, // icon - 0x11d: 0x28105, // video - 0x11e: 0x4de05, // class - 0x121: 0x5d40e, // onvolumechange - 0x122: 0xaa06, // onblur - 0x123: 0x2b909, // itemscope - 0x124: 0x61105, // style - 0x127: 0x41e06, // public - 0x129: 0x2320e, // formnovalidate - 0x12a: 0x58206, // onshow - 0x12c: 0x51706, // onplay - 0x12d: 0x3c804, // cite - 0x12e: 0x2bc02, // ms - 0x12f: 0xdb0c, // ontimeupdate - 0x130: 0x10904, // kind - 0x131: 0x2470a, // formtarget - 0x135: 0x3af07, // onended - 0x136: 0x26506, // hidden - 0x137: 0x2c01, // s - 0x139: 0x2280a, // formmethod - 0x13a: 0x3e805, // input - 0x13c: 0x50b02, // h6 - 0x13d: 0xc902, // ol - 0x13e: 0x3420b, // oncuechange - 0x13f: 0x1e50d, // foreignobject - 0x143: 0x4e70e, // onbeforeunload - 0x144: 0x2bd05, // scope - 0x145: 0x39609, // onemptied - 0x146: 0x14b05, // defer - 0x147: 0xc103, // xmp - 0x148: 0x39f10, // ondurationchange - 0x149: 0x1903, // kbd - 0x14c: 0x47609, // onmessage - 0x14d: 0x60006, // option - 0x14e: 0x2eb09, // minlength - 0x14f: 0x32807, // checked - 0x150: 0xce08, // autoplay - 0x152: 0x202, // br - 0x153: 0x2360a, // novalidate - 0x156: 0x6307, // noembed - 0x159: 0x31007, // onclick - 0x15a: 0x47f0b, // onmousedown - 0x15b: 0x3a708, // onchange - 0x15e: 0x3f209, // oninvalid - 0x15f: 0x2bd06, // scoped - 0x160: 0x18808, // controls - 0x161: 0x30b05, // muted - 0x162: 0x58d08, // sortable - 0x163: 0x51106, // usemap - 0x164: 0x1b80a, // figcaption - 0x165: 0x35706, // ondrag - 0x166: 0x26b04, // high - 0x168: 0x3c303, // src - 0x169: 0x15706, // poster - 0x16b: 0x1670e, // annotation-xml - 0x16c: 0x5f704, // step - 0x16d: 0x4, // abbr - 0x16e: 0x1b06, // dialog - 0x170: 0x1202, // li - 0x172: 0x3ed02, // mo - 0x175: 0x1d803, // for - 0x176: 0x1a803, // ins - 0x178: 0x55504, // size - 0x179: 0x43210, // onlanguagechange - 0x17a: 0x8607, // default - 0x17b: 0x1a03, // bdi - 0x17c: 0x4d30a, // onpagehide - 0x17d: 0x6907, // dirname - 0x17e: 0x21404, // type - 0x17f: 0x1f204, // form - 0x181: 0x28509, // oncanplay - 0x182: 0x6103, // dfn - 0x183: 0x46308, // tabindex - 0x186: 0x6502, // em - 0x187: 0x27404, // lang - 0x189: 0x39108, // dropzone - 0x18a: 0x4080a, // onkeypress - 0x18b: 0x23c08, // datetime - 0x18c: 0x16204, // cols - 0x18d: 0x1, // a - 0x18e: 0x4420c, // onloadeddata - 0x190: 0xa605, // audio - 0x192: 0x2e05, // tbody - 0x193: 0x22c06, // method - 0x195: 0xf404, // loop - 0x196: 0x29606, // iframe - 0x198: 0x2d504, // head - 0x19e: 0x5f108, // manifest - 0x19f: 0xb309, // autofocus - 0x1a0: 0x14904, // code - 0x1a1: 0x55906, // strong - 0x1a2: 0x30308, // multiple - 0x1a3: 0xc05, // param - 0x1a6: 0x21107, // enctype - 0x1a7: 0x5b304, // face - 0x1a8: 0xfd09, // plaintext - 0x1a9: 0x26e02, // h1 - 0x1aa: 0x59509, // onstalled - 0x1ad: 0x3d406, // script - 0x1ae: 0x2db06, // spacer - 0x1af: 0x55108, // onresize - 0x1b0: 0x4a20b, // onmouseover - 0x1b1: 0x5cc08, // onunload - 0x1b2: 0x56708, // onseeked - 0x1b4: 0x2140d, // typemustmatch - 0x1b5: 0x1cc06, // figure - 0x1b6: 0x4950a, // onmouseout - 0x1b7: 0x25e03, // pre - 0x1b8: 0x50705, // width - 0x1b9: 0x19906, // sorted - 0x1bb: 0x5704, // nobr - 0x1be: 0x5302, // tt - 0x1bf: 0x1105, // align - 0x1c0: 0x3e607, // oninput - 0x1c3: 0x41807, // onkeyup - 0x1c6: 0x1c00c, // onafterprint - 0x1c7: 0x210e, // accept-charset - 0x1c8: 0x33c06, // itemid - 0x1c9: 0x3e809, // inputmode - 0x1cb: 0x53306, // strike - 0x1cc: 0x5a903, // sub - 0x1cd: 0x10505, // track - 0x1ce: 0x38605, // start - 0x1d0: 0xd608, // basefont - 0x1d6: 0x1aa06, // source - 0x1d7: 0x18206, // legend - 0x1d8: 0x2d405, // thead - 0x1da: 0x8c05, // tfoot - 0x1dd: 0x1ec06, // object - 0x1de: 0x6e05, // media - 0x1df: 0x1670a, // annotation - 0x1e0: 0x20d0b, // formenctype - 0x1e2: 0x3d208, // noscript - 0x1e4: 0x55505, // sizes - 0x1e5: 0x1fc0c, // autocomplete - 0x1e6: 0x9504, // span - 0x1e7: 0x9808, // noframes - 0x1e8: 0x24b06, // target - 0x1e9: 0x38f06, // ondrop - 0x1ea: 0x2b306, // applet - 0x1ec: 0x5a08, // reversed - 0x1f0: 0x2a907, // isindex - 0x1f3: 0x27008, // hreflang - 0x1f5: 0x2f302, // h5 - 0x1f6: 0x4f307, // address - 0x1fa: 0x2e103, // max - 0x1fb: 0xc30b, // placeholder - 0x1fc: 0x2f608, // textarea - 0x1fe: 0x4ad09, // onmouseup - 0x1ff: 0x3800b, // ondragstart -} - -const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + - "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + - "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + - "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + - "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + - "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + - "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + - "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + - "bjectforeignobjectformactionautocompleteerrorformenctypemust" + - "matchallengeformmethodformnovalidatetimeterformtargetheightm" + - "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + - "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + - "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + - "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + - "hangeondblclickondragendondragenterondragleaveondragoverondr" + - "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + - "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + - "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + - "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + - "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + - "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + - "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + - "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + - "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + - "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + - "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + - "mmarysupsvgsystemplate" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index 52f651f..0000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.3.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "isindex": true, - "li": true, - "link": true, - "listing": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "svg": - return element.Data == "foreignObject" - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 94f4968..0000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case ErrorToken: - return z.Err() - case TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case StartTagToken, EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a..0000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index a50c04c..0000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d856139..0000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index d3b3844..0000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.5.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.5.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.5.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 26b657a..0000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - scopeMarkerNode -) - -// Section 12.2.3.3 says "scope markers are inserted when entering applet -// elements, buttons, object elements, marquees, table cells, and table -// captions, and are used to prevent formatting from 'leaking'". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index be4b2bf..0000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2094 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like


are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.3.2) and active formatting - // elements (section 12.2.3.3). - oe, afe nodeStack - // Element pointers (section 12.2.3.4). - head, form *Node - // Other parsing state flags (section 12.2.3.5). - scripting, framesetOK bool - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.5.3). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.3.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.3.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.5.3, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.3.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.3.3. -func (p *parser) clearActiveFormattingElements() { - for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.3.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.4. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.3.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.3.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.3.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if i == 0 && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - p.im = inSelectIM - case a.Td, a.Th: - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Head: - p.im = inBodyIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - p.im = beforeHeadIM - default: - continue - } - return - } - p.im = inBodyIM -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.5.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.5.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.5.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head: - n := p.oe.pop() - if n.DataAtom != a.Head { - panic("html: bad parser state: element not found, in the in-head insertion mode") - } - p.im = afterHeadIM - return true - case a.Body, a.Html, a.Br: - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.6. -func afterHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Body: - p.addElement() - p.framesetOK = false - p.im = inBodyIM - return true - case a.Frameset: - p.addElement() - p.im = inFramesetIM - return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: - p.oe = append(p.oe, p.head) - defer p.oe.remove(p.head) - return inHeadIM(p) - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Body, a.Html, a.Br: - // Drop down to creating an implied tag. - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.5.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
 block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form == nil {
-				p.popUntil(buttonScope, a.P)
-				p.addElement()
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A)
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr)
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Isindex:
-			if p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			action := ""
-			prompt := "This is a searchable index. Enter search keywords: "
-			attr := []Attribute{{Key: "name", Val: "isindex"}}
-			for _, t := range p.tok.Attr {
-				switch t.Key {
-				case "action":
-					action = t.Val
-				case "name":
-					// Ignore the attribute.
-				case "prompt":
-					prompt = t.Val
-				default:
-					attr = append(attr, t)
-				}
-			}
-			p.acknowledgeSelfClosingTag()
-			p.popUntil(buttonScope, a.P)
-			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
-			if action != "" {
-				p.form.Attr = []Attribute{{Key: "action", Val: action}}
-			}
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
-			p.addText(prompt)
-			p.addChild(&Node{
-				Type:     ElementNode,
-				DataAtom: a.Input,
-				Data:     a.Input.String(),
-				Attr:     attr,
-			})
-			p.oe.pop()
-			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Iframe:
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Noembed, a.Noscript:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			node := p.form
-			p.form = nil
-			i := p.indexOfElementInScope(defaultScope, a.Form)
-			if node == nil || i == -1 || p.oe[i] != node {
-				// Ignore the token.
-				return true
-			}
-			p.generateImpliedEndTags()
-			p.oe.remove(node)
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-4. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 5. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom)
-			return
-		}
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Steps 9-10. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 11-12. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 13. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Steps 13.1-13.2
-		for j := 0; j < 3; j++ {
-			// Step 13.3.
-			x--
-			node = p.oe[x]
-			// Step 13.4 - 13.5.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 13.6.
-			if node == formattingElement {
-				break
-			}
-			// Step 13.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 13.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 13.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 13.10.
-			lastNode = node
-		}
-
-		// Step 14. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 15-17. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 18. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 19. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == tagAtom {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.5.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a